code
stringlengths
17
6.64M
@PLUGIN_LAYERS.register_module() class ContextBlock(nn.Module): "ContextBlock module in GCNet.\n\n See 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond'\n (https://arxiv.org/abs/1904.11492) for details.\n\n Args:\n in_channels (int): Channels of the input feature map.\n ratio (float): Ratio of channels of transform bottleneck\n pooling_type (str): Pooling method for context modeling.\n Options are 'att' and 'avg', stand for attention pooling and\n average pooling respectively. Default: 'att'.\n fusion_types (Sequence[str]): Fusion method for feature fusion,\n Options are 'channels_add', 'channel_mul', stand for channelwise\n addition and multiplication respectively. Default: ('channel_add',)\n " _abbr_ = 'context_block' def __init__(self, in_channels, ratio, pooling_type='att', fusion_types=('channel_add',)): super(ContextBlock, self).__init__() assert (pooling_type in ['avg', 'att']) assert isinstance(fusion_types, (list, tuple)) valid_fusion_types = ['channel_add', 'channel_mul'] assert all([(f in valid_fusion_types) for f in fusion_types]) assert (len(fusion_types) > 0), 'at least one fusion should be used' self.in_channels = in_channels self.ratio = ratio self.planes = int((in_channels * ratio)) self.pooling_type = pooling_type self.fusion_types = fusion_types if (pooling_type == 'att'): self.conv_mask = nn.Conv2d(in_channels, 1, kernel_size=1) self.softmax = nn.Softmax(dim=2) else: self.avg_pool = nn.AdaptiveAvgPool2d(1) if ('channel_add' in fusion_types): self.channel_add_conv = nn.Sequential(nn.Conv2d(self.in_channels, self.planes, kernel_size=1), nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), nn.Conv2d(self.planes, self.in_channels, kernel_size=1)) else: self.channel_add_conv = None if ('channel_mul' in fusion_types): self.channel_mul_conv = nn.Sequential(nn.Conv2d(self.in_channels, self.planes, kernel_size=1), nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), nn.Conv2d(self.planes, self.in_channels, kernel_size=1)) else: self.channel_mul_conv = None self.reset_parameters() def reset_parameters(self): if (self.pooling_type == 'att'): kaiming_init(self.conv_mask, mode='fan_in') self.conv_mask.inited = True if (self.channel_add_conv is not None): last_zero_init(self.channel_add_conv) if (self.channel_mul_conv is not None): last_zero_init(self.channel_mul_conv) def spatial_pool(self, x): (batch, channel, height, width) = x.size() if (self.pooling_type == 'att'): input_x = x input_x = input_x.view(batch, channel, (height * width)) input_x = input_x.unsqueeze(1) context_mask = self.conv_mask(x) context_mask = context_mask.view(batch, 1, (height * width)) context_mask = self.softmax(context_mask) context_mask = context_mask.unsqueeze((- 1)) context = torch.matmul(input_x, context_mask) context = context.view(batch, channel, 1, 1) else: context = self.avg_pool(x) return context def forward(self, x): context = self.spatial_pool(x) out = x if (self.channel_mul_conv is not None): channel_mul_term = torch.sigmoid(self.channel_mul_conv(context)) out = (out * channel_mul_term) if (self.channel_add_conv is not None): channel_add_term = self.channel_add_conv(context) out = (out + channel_add_term) return out
def build_conv_layer(cfg, *args, **kwargs): 'Build convolution layer.\n\n Args:\n cfg (None or dict): The conv layer config, which should contain:\n - type (str): Layer type.\n - layer args: Args needed to instantiate an conv layer.\n args (argument list): Arguments passed to the `__init__`\n method of the corresponding conv layer.\n kwargs (keyword arguments): Keyword arguments passed to the `__init__`\n method of the corresponding conv layer.\n\n Returns:\n nn.Module: Created conv layer.\n ' if (cfg is None): cfg_ = dict(type='Conv2d') else: if (not isinstance(cfg, dict)): raise TypeError('cfg must be a dict') if ('type' not in cfg): raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if (layer_type not in CONV_LAYERS): raise KeyError(f'Unrecognized layer type {layer_type}') else: conv_layer = CONV_LAYERS.get(layer_type) layer = conv_layer(*args, **kwargs, **cfg_) return layer
@CONV_LAYERS.register_module() class Conv2dAdaptivePadding(nn.Conv2d): 'Implementation of 2D convolution in tensorflow with `padding` as "same",\n which applies padding to input (if needed) so that input image gets fully\n covered by filter and stride you specified. For stride 1, this will ensure\n that output image size is same as input. For stride of 2, output dimensions\n will be half, for example.\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements.\n Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n ' def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) def forward(self, x): (img_h, img_w) = x.size()[(- 2):] (kernel_h, kernel_w) = self.weight.size()[(- 2):] (stride_h, stride_w) = self.stride output_h = math.ceil((img_h / stride_h)) output_w = math.ceil((img_w / stride_w)) pad_h = max((((((output_h - 1) * self.stride[0]) + ((kernel_h - 1) * self.dilation[0])) + 1) - img_h), 0) pad_w = max((((((output_w - 1) * self.stride[1]) + ((kernel_w - 1) * self.dilation[1])) + 1) - img_w), 0) if ((pad_h > 0) or (pad_w > 0)): x = F.pad(x, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))]) return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
@PLUGIN_LAYERS.register_module() class ConvModule(nn.Module): 'A conv block that bundles conv/norm/activation layers.\n\n This block simplifies the usage of convolution layers, which are commonly\n used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).\n It is based upon three build methods: `build_conv_layer()`,\n `build_norm_layer()` and `build_activation_layer()`.\n\n Besides, we add some additional features in this module.\n 1. Automatically set `bias` of the conv layer.\n 2. Spectral norm is supported.\n 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only\n supports zero and circular padding, and we add "reflect" padding mode.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n Same as that in ``nn._ConvNd``.\n out_channels (int): Number of channels produced by the convolution.\n Same as that in ``nn._ConvNd``.\n kernel_size (int | tuple[int]): Size of the convolving kernel.\n Same as that in ``nn._ConvNd``.\n stride (int | tuple[int]): Stride of the convolution.\n Same as that in ``nn._ConvNd``.\n padding (int | tuple[int]): Zero-padding added to both sides of\n the input. Same as that in ``nn._ConvNd``.\n dilation (int | tuple[int]): Spacing between kernel elements.\n Same as that in ``nn._ConvNd``.\n groups (int): Number of blocked connections from input channels to\n output channels. Same as that in ``nn._ConvNd``.\n bias (bool | str): If specified as `auto`, it will be decided by the\n norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise\n False. Default: "auto".\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type=\'ReLU\').\n inplace (bool): Whether to use inplace mode for activation.\n Default: True.\n with_spectral_norm (bool): Whether use spectral norm in conv module.\n Default: False.\n padding_mode (str): If the `padding_mode` has not been supported by\n current `Conv2d` in PyTorch, we will use our own padding layer\n instead. Currently, we support [\'zeros\', \'circular\'] with official\n implementation and [\'reflect\'] with our own implementation.\n Default: \'zeros\'.\n order (tuple[str]): The order of conv/norm/activation layers. It is a\n sequence of "conv", "norm" and "act". Common examples are\n ("conv", "norm", "act") and ("act", "conv", "norm").\n Default: (\'conv\', \'norm\', \'act\').\n ' _abbr_ = 'conv_block' def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias='auto', conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), inplace=True, with_spectral_norm=False, padding_mode='zeros', order=('conv', 'norm', 'act')): super(ConvModule, self).__init__() assert ((conv_cfg is None) or isinstance(conv_cfg, dict)) assert ((norm_cfg is None) or isinstance(norm_cfg, dict)) assert ((act_cfg is None) or isinstance(act_cfg, dict)) official_padding_mode = ['zeros', 'circular'] self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.inplace = inplace self.with_spectral_norm = with_spectral_norm self.with_explicit_padding = (padding_mode not in official_padding_mode) self.order = order assert (isinstance(self.order, tuple) and (len(self.order) == 3)) assert (set(order) == set(['conv', 'norm', 'act'])) self.with_norm = (norm_cfg is not None) self.with_activation = (act_cfg is not None) if (bias == 'auto'): bias = (not self.with_norm) self.with_bias = bias if self.with_explicit_padding: pad_cfg = dict(type=padding_mode) self.padding_layer = build_padding_layer(pad_cfg, padding) conv_padding = (0 if self.with_explicit_padding else padding) self.conv = build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, stride=stride, padding=conv_padding, dilation=dilation, groups=groups, bias=bias) self.in_channels = self.conv.in_channels self.out_channels = self.conv.out_channels self.kernel_size = self.conv.kernel_size self.stride = self.conv.stride self.padding = padding self.dilation = self.conv.dilation self.transposed = self.conv.transposed self.output_padding = self.conv.output_padding self.groups = self.conv.groups if self.with_spectral_norm: self.conv = nn.utils.spectral_norm(self.conv) if self.with_norm: if (order.index('norm') > order.index('conv')): norm_channels = out_channels else: norm_channels = in_channels (self.norm_name, norm) = build_norm_layer(norm_cfg, norm_channels) self.add_module(self.norm_name, norm) if self.with_bias: if isinstance(norm, (_BatchNorm, _InstanceNorm)): warnings.warn('Unnecessary conv bias before batch/instance norm') else: self.norm_name = None if self.with_activation: act_cfg_ = act_cfg.copy() if (act_cfg_['type'] not in ['Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish']): act_cfg_.setdefault('inplace', inplace) self.activate = build_activation_layer(act_cfg_) self.init_weights() @property def norm(self): if self.norm_name: return getattr(self, self.norm_name) else: return None def init_weights(self): if (not hasattr(self.conv, 'init_weights')): if (self.with_activation and (self.act_cfg['type'] == 'LeakyReLU')): nonlinearity = 'leaky_relu' a = self.act_cfg.get('negative_slope', 0.01) else: nonlinearity = 'relu' a = 0 kaiming_init(self.conv, a=a, nonlinearity=nonlinearity) if self.with_norm: constant_init(self.norm, 1, bias=0) def forward(self, x, activate=True, norm=True): for layer in self.order: if (layer == 'conv'): if self.with_explicit_padding: x = self.padding_layer(x) x = self.conv(x) elif ((layer == 'norm') and norm and self.with_norm): x = self.norm(x) elif ((layer == 'act') and activate and self.with_activation): x = self.activate(x) return x
def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, eps=1e-05): c_in = weight.size(0) weight_flat = weight.view(c_in, (- 1)) mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) weight = ((weight - mean) / (std + eps)) return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
@CONV_LAYERS.register_module('ConvWS') class ConvWS2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, eps=1e-05): super(ConvWS2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.eps = eps def forward(self, x): return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.eps)
@CONV_LAYERS.register_module(name='ConvAWS') class ConvAWS2d(nn.Conv2d): 'AWS (Adaptive Weight Standardization)\n\n This is a variant of Weight Standardization\n (https://arxiv.org/pdf/1903.10520.pdf)\n It is used in DetectoRS to avoid NaN\n (https://arxiv.org/pdf/2006.02334.pdf)\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the conv kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements.\n Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If set True, adds a learnable bias to the\n output. Default: True\n ' def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super().__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.register_buffer('weight_gamma', torch.ones(self.out_channels, 1, 1, 1)) self.register_buffer('weight_beta', torch.zeros(self.out_channels, 1, 1, 1)) def _get_weight(self, weight): weight_flat = weight.view(weight.size(0), (- 1)) mean = weight_flat.mean(dim=1).view((- 1), 1, 1, 1) std = torch.sqrt((weight_flat.var(dim=1) + 1e-05)).view((- 1), 1, 1, 1) weight = ((weight - mean) / std) weight = ((self.weight_gamma * weight) + self.weight_beta) return weight def forward(self, x): weight = self._get_weight(self.weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): 'Override default load function.\n\n AWS overrides the function _load_from_state_dict to recover\n weight_gamma and weight_beta if they are missing. If weight_gamma and\n weight_beta are found in the checkpoint, this function will return\n after super()._load_from_state_dict. Otherwise, it will compute the\n mean and std of the pretrained weights and store them in weight_beta\n and weight_gamma.\n ' self.weight_gamma.data.fill_((- 1)) local_missing_keys = [] super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, local_missing_keys, unexpected_keys, error_msgs) if (self.weight_gamma.data.mean() > 0): for k in local_missing_keys: missing_keys.append(k) return weight = self.weight.data weight_flat = weight.view(weight.size(0), (- 1)) mean = weight_flat.mean(dim=1).view((- 1), 1, 1, 1) std = torch.sqrt((weight_flat.var(dim=1) + 1e-05)).view((- 1), 1, 1, 1) self.weight_beta.data.copy_(mean) self.weight_gamma.data.copy_(std) missing_gamma_beta = [k for k in local_missing_keys if (k.endswith('weight_gamma') or k.endswith('weight_beta'))] for k in missing_gamma_beta: local_missing_keys.remove(k) for k in local_missing_keys: missing_keys.append(k)
class DepthwiseSeparableConvModule(nn.Module): "Depthwise separable convolution module.\n\n See https://arxiv.org/pdf/1704.04861.pdf for details.\n\n This module can replace a ConvModule with the conv block replaced by two\n conv block: depthwise conv block and pointwise conv block. The depthwise\n conv block contains depthwise-conv/norm/activation layers. The pointwise\n conv block contains pointwise-conv/norm/activation layers. It should be\n noted that there will be norm/activation layer in the depthwise conv block\n if `norm_cfg` and `act_cfg` are specified.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n Same as that in ``nn._ConvNd``.\n out_channels (int): Number of channels produced by the convolution.\n Same as that in ``nn._ConvNd``.\n kernel_size (int | tuple[int]): Size of the convolving kernel.\n Same as that in ``nn._ConvNd``.\n stride (int | tuple[int]): Stride of the convolution.\n Same as that in ``nn._ConvNd``. Default: 1.\n padding (int | tuple[int]): Zero-padding added to both sides of\n the input. Same as that in ``nn._ConvNd``. Default: 0.\n dilation (int | tuple[int]): Spacing between kernel elements.\n Same as that in ``nn._ConvNd``. Default: 1.\n norm_cfg (dict): Default norm config for both depthwise ConvModule and\n pointwise ConvModule. Default: None.\n act_cfg (dict): Default activation config for both depthwise ConvModule\n and pointwise ConvModule. Default: dict(type='ReLU').\n dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is\n 'default', it will be the same as `norm_cfg`. Default: 'default'.\n dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is\n 'default', it will be the same as `act_cfg`. Default: 'default'.\n pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is\n 'default', it will be the same as `norm_cfg`. Default: 'default'.\n pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is\n 'default', it will be the same as `act_cfg`. Default: 'default'.\n kwargs (optional): Other shared arguments for depthwise and pointwise\n ConvModule. See ConvModule for ref.\n " def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, norm_cfg=None, act_cfg=dict(type='ReLU'), dw_norm_cfg='default', dw_act_cfg='default', pw_norm_cfg='default', pw_act_cfg='default', **kwargs): super(DepthwiseSeparableConvModule, self).__init__() assert ('groups' not in kwargs), 'groups should not be specified' dw_norm_cfg = (dw_norm_cfg if (dw_norm_cfg != 'default') else norm_cfg) dw_act_cfg = (dw_act_cfg if (dw_act_cfg != 'default') else act_cfg) pw_norm_cfg = (pw_norm_cfg if (pw_norm_cfg != 'default') else norm_cfg) pw_act_cfg = (pw_act_cfg if (pw_act_cfg != 'default') else act_cfg) self.depthwise_conv = ConvModule(in_channels, in_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, norm_cfg=dw_norm_cfg, act_cfg=dw_act_cfg, **kwargs) self.pointwise_conv = ConvModule(in_channels, out_channels, 1, norm_cfg=pw_norm_cfg, act_cfg=pw_act_cfg, **kwargs) def forward(self, x): x = self.depthwise_conv(x) x = self.pointwise_conv(x) return x
def drop_path(x, drop_prob=0.0, training=False): 'Drop paths (Stochastic Depth) per sample (when applied in main path of\n residual blocks).\n\n We follow the implementation\n https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501\n ' if ((drop_prob == 0.0) or (not training)): return x keep_prob = (1 - drop_prob) shape = ((x.shape[0],) + ((1,) * (x.ndim - 1))) random_tensor = (keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)) output = (x.div(keep_prob) * random_tensor.floor()) return output
@DROPOUT_LAYERS.register_module() class DropPath(nn.Module): 'Drop paths (Stochastic Depth) per sample (when applied in main path of\n residual blocks).\n\n We follow the implementation\n https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501\n\n Args:\n drop_prob (float): Probability of the path to be zeroed. Default: 0.1\n ' def __init__(self, drop_prob=0.1): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training)
@DROPOUT_LAYERS.register_module() class Dropout(nn.Dropout): 'A wrapper for ``torch.nn.Dropout``, We rename the ``p`` of\n ``torch.nn.Dropout`` to ``drop_prob`` so as to be consistent with\n ``DropPath``\n\n Args:\n drop_prob (float): Probability of the elements to be\n zeroed. Default: 0.5.\n inplace (bool): Do the operation inplace or not. Default: False.\n ' def __init__(self, drop_prob=0.5, inplace=False): super().__init__(p=drop_prob, inplace=inplace)
def build_dropout(cfg, default_args=None): 'Builder for drop out layers.' return build_from_cfg(cfg, DROPOUT_LAYERS, default_args)
@ACTIVATION_LAYERS.register_module() class HSigmoid(nn.Module): 'Hard Sigmoid Module. Apply the hard sigmoid function:\n Hsigmoid(x) = min(max((x + bias) / divisor, min_value), max_value)\n Default: Hsigmoid(x) = min(max((x + 3) / 6, 0), 1)\n\n Note:\n In MMCV v1.4.4, we modified the default value of args to align with\n PyTorch official.\n\n Args:\n bias (float): Bias of the input feature map. Default: 3.0.\n divisor (float): Divisor of the input feature map. Default: 6.0.\n min_value (float): Lower bound value. Default: 0.0.\n max_value (float): Upper bound value. Default: 1.0.\n\n Returns:\n Tensor: The output tensor.\n ' def __init__(self, bias=3.0, divisor=6.0, min_value=0.0, max_value=1.0): super(HSigmoid, self).__init__() warnings.warn('In MMCV v1.4.4, we modified the default value of args to align with PyTorch official. Previous Implementation: Hsigmoid(x) = min(max((x + 1) / 2, 0), 1). Current Implementation: Hsigmoid(x) = min(max((x + 3) / 6, 0), 1).') self.bias = bias self.divisor = divisor assert (self.divisor != 0) self.min_value = min_value self.max_value = max_value def forward(self, x): x = ((x + self.bias) / self.divisor) return x.clamp_(self.min_value, self.max_value)
class HSwish(nn.Module): 'Hard Swish Module.\n\n This module applies the hard swish function:\n\n .. math::\n Hswish(x) = x * ReLU6(x + 3) / 6\n\n Args:\n inplace (bool): can optionally do the operation in-place.\n Default: False.\n\n Returns:\n Tensor: The output tensor.\n ' def __init__(self, inplace=False): super(HSwish, self).__init__() self.act = nn.ReLU6(inplace) def forward(self, x): return ((x * self.act((x + 3))) / 6)
class _NonLocalNd(nn.Module, metaclass=ABCMeta): 'Basic Non-local module.\n\n This module is proposed in\n "Non-local Neural Networks"\n Paper reference: https://arxiv.org/abs/1711.07971\n Code reference: https://github.com/AlexHex7/Non-local_pytorch\n\n Args:\n in_channels (int): Channels of the input feature map.\n reduction (int): Channel reduction ratio. Default: 2.\n use_scale (bool): Whether to scale pairwise_weight by\n `1/sqrt(inter_channels)` when the mode is `embedded_gaussian`.\n Default: True.\n conv_cfg (None | dict): The config dict for convolution layers.\n If not specified, it will use `nn.Conv2d` for convolution layers.\n Default: None.\n norm_cfg (None | dict): The config dict for normalization layers.\n Default: None. (This parameter is only applicable to conv_out.)\n mode (str): Options are `gaussian`, `concatenation`,\n `embedded_gaussian` and `dot_product`. Default: embedded_gaussian.\n ' def __init__(self, in_channels, reduction=2, use_scale=True, conv_cfg=None, norm_cfg=None, mode='embedded_gaussian', **kwargs): super(_NonLocalNd, self).__init__() self.in_channels = in_channels self.reduction = reduction self.use_scale = use_scale self.inter_channels = max((in_channels // reduction), 1) self.mode = mode if (mode not in ['gaussian', 'embedded_gaussian', 'dot_product', 'concatenation']): raise ValueError(f"Mode should be in 'gaussian', 'concatenation', 'embedded_gaussian' or 'dot_product', but got {mode} instead.") self.g = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, conv_cfg=conv_cfg, act_cfg=None) self.conv_out = ConvModule(self.inter_channels, self.in_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) if (self.mode != 'gaussian'): self.theta = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, conv_cfg=conv_cfg, act_cfg=None) self.phi = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, conv_cfg=conv_cfg, act_cfg=None) if (self.mode == 'concatenation'): self.concat_project = ConvModule((self.inter_channels * 2), 1, kernel_size=1, stride=1, padding=0, bias=False, act_cfg=dict(type='ReLU')) self.init_weights(**kwargs) def init_weights(self, std=0.01, zeros_init=True): if (self.mode != 'gaussian'): for m in [self.g, self.theta, self.phi]: normal_init(m.conv, std=std) else: normal_init(self.g.conv, std=std) if zeros_init: if (self.conv_out.norm_cfg is None): constant_init(self.conv_out.conv, 0) else: constant_init(self.conv_out.norm, 0) elif (self.conv_out.norm_cfg is None): normal_init(self.conv_out.conv, std=std) else: normal_init(self.conv_out.norm, std=std) def gaussian(self, theta_x, phi_x): pairwise_weight = torch.matmul(theta_x, phi_x) pairwise_weight = pairwise_weight.softmax(dim=(- 1)) return pairwise_weight def embedded_gaussian(self, theta_x, phi_x): pairwise_weight = torch.matmul(theta_x, phi_x) if self.use_scale: pairwise_weight /= (theta_x.shape[(- 1)] ** 0.5) pairwise_weight = pairwise_weight.softmax(dim=(- 1)) return pairwise_weight def dot_product(self, theta_x, phi_x): pairwise_weight = torch.matmul(theta_x, phi_x) pairwise_weight /= pairwise_weight.shape[(- 1)] return pairwise_weight def concatenation(self, theta_x, phi_x): h = theta_x.size(2) w = phi_x.size(3) theta_x = theta_x.repeat(1, 1, 1, w) phi_x = phi_x.repeat(1, 1, h, 1) concat_feature = torch.cat([theta_x, phi_x], dim=1) pairwise_weight = self.concat_project(concat_feature) (n, _, h, w) = pairwise_weight.size() pairwise_weight = pairwise_weight.view(n, h, w) pairwise_weight /= pairwise_weight.shape[(- 1)] return pairwise_weight def forward(self, x): n = x.size(0) g_x = self.g(x).view(n, self.inter_channels, (- 1)) g_x = g_x.permute(0, 2, 1) if (self.mode == 'gaussian'): theta_x = x.view(n, self.in_channels, (- 1)) theta_x = theta_x.permute(0, 2, 1) if self.sub_sample: phi_x = self.phi(x).view(n, self.in_channels, (- 1)) else: phi_x = x.view(n, self.in_channels, (- 1)) elif (self.mode == 'concatenation'): theta_x = self.theta(x).view(n, self.inter_channels, (- 1), 1) phi_x = self.phi(x).view(n, self.inter_channels, 1, (- 1)) else: theta_x = self.theta(x).view(n, self.inter_channels, (- 1)) theta_x = theta_x.permute(0, 2, 1) phi_x = self.phi(x).view(n, self.inter_channels, (- 1)) pairwise_func = getattr(self, self.mode) pairwise_weight = pairwise_func(theta_x, phi_x) y = torch.matmul(pairwise_weight, g_x) y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels, *x.size()[2:]) output = (x + self.conv_out(y)) return output
class NonLocal1d(_NonLocalNd): "1D Non-local module.\n\n Args:\n in_channels (int): Same as `NonLocalND`.\n sub_sample (bool): Whether to apply max pooling after pairwise\n function (Note that the `sub_sample` is applied on spatial only).\n Default: False.\n conv_cfg (None | dict): Same as `NonLocalND`.\n Default: dict(type='Conv1d').\n " def __init__(self, in_channels, sub_sample=False, conv_cfg=dict(type='Conv1d'), **kwargs): super(NonLocal1d, self).__init__(in_channels, conv_cfg=conv_cfg, **kwargs) self.sub_sample = sub_sample if sub_sample: max_pool_layer = nn.MaxPool1d(kernel_size=2) self.g = nn.Sequential(self.g, max_pool_layer) if (self.mode != 'gaussian'): self.phi = nn.Sequential(self.phi, max_pool_layer) else: self.phi = max_pool_layer
@PLUGIN_LAYERS.register_module() class NonLocal2d(_NonLocalNd): "2D Non-local module.\n\n Args:\n in_channels (int): Same as `NonLocalND`.\n sub_sample (bool): Whether to apply max pooling after pairwise\n function (Note that the `sub_sample` is applied on spatial only).\n Default: False.\n conv_cfg (None | dict): Same as `NonLocalND`.\n Default: dict(type='Conv2d').\n " _abbr_ = 'nonlocal_block' def __init__(self, in_channels, sub_sample=False, conv_cfg=dict(type='Conv2d'), **kwargs): super(NonLocal2d, self).__init__(in_channels, conv_cfg=conv_cfg, **kwargs) self.sub_sample = sub_sample if sub_sample: max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2)) self.g = nn.Sequential(self.g, max_pool_layer) if (self.mode != 'gaussian'): self.phi = nn.Sequential(self.phi, max_pool_layer) else: self.phi = max_pool_layer
class NonLocal3d(_NonLocalNd): "3D Non-local module.\n\n Args:\n in_channels (int): Same as `NonLocalND`.\n sub_sample (bool): Whether to apply max pooling after pairwise\n function (Note that the `sub_sample` is applied on spatial only).\n Default: False.\n conv_cfg (None | dict): Same as `NonLocalND`.\n Default: dict(type='Conv3d').\n " def __init__(self, in_channels, sub_sample=False, conv_cfg=dict(type='Conv3d'), **kwargs): super(NonLocal3d, self).__init__(in_channels, conv_cfg=conv_cfg, **kwargs) self.sub_sample = sub_sample if sub_sample: max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2)) self.g = nn.Sequential(self.g, max_pool_layer) if (self.mode != 'gaussian'): self.phi = nn.Sequential(self.phi, max_pool_layer) else: self.phi = max_pool_layer
def infer_abbr(class_type): 'Infer abbreviation from the class name.\n\n When we build a norm layer with `build_norm_layer()`, we want to preserve\n the norm type in variable names, e.g, self.bn1, self.gn. This method will\n infer the abbreviation to map class types to abbreviations.\n\n Rule 1: If the class has the property "_abbr_", return the property.\n Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or\n InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and\n "in" respectively.\n Rule 3: If the class name contains "batch", "group", "layer" or "instance",\n the abbreviation of this layer will be "bn", "gn", "ln" and "in"\n respectively.\n Rule 4: Otherwise, the abbreviation falls back to "norm".\n\n Args:\n class_type (type): The norm layer type.\n\n Returns:\n str: The inferred abbreviation.\n ' if (not inspect.isclass(class_type)): raise TypeError(f'class_type must be a type, but got {type(class_type)}') if hasattr(class_type, '_abbr_'): return class_type._abbr_ if issubclass(class_type, _InstanceNorm): return 'in' elif issubclass(class_type, _BatchNorm): return 'bn' elif issubclass(class_type, nn.GroupNorm): return 'gn' elif issubclass(class_type, nn.LayerNorm): return 'ln' else: class_name = class_type.__name__.lower() if ('batch' in class_name): return 'bn' elif ('group' in class_name): return 'gn' elif ('layer' in class_name): return 'ln' elif ('instance' in class_name): return 'in' else: return 'norm_layer'
def build_norm_layer(cfg, num_features, postfix=''): 'Build normalization layer.\n\n Args:\n cfg (dict): The norm layer config, which should contain:\n\n - type (str): Layer type.\n - layer args: Args needed to instantiate a norm layer.\n - requires_grad (bool, optional): Whether stop gradient updates.\n num_features (int): Number of input channels.\n postfix (int | str): The postfix to be appended into norm abbreviation\n to create named layer.\n\n Returns:\n tuple[str, nn.Module]: The first element is the layer name consisting\n of abbreviation and postfix, e.g., bn1, gn. The second element is the\n created norm layer.\n ' if (not isinstance(cfg, dict)): raise TypeError('cfg must be a dict') if ('type' not in cfg): raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if (layer_type not in NORM_LAYERS): raise KeyError(f'Unrecognized norm type {layer_type}') norm_layer = NORM_LAYERS.get(layer_type) abbr = infer_abbr(norm_layer) assert isinstance(postfix, (int, str)) name = (abbr + str(postfix)) requires_grad = cfg_.pop('requires_grad', True) cfg_.setdefault('eps', 1e-05) if (layer_type != 'GN'): layer = norm_layer(num_features, **cfg_) if ((layer_type == 'SyncBN') and hasattr(layer, '_specify_ddp_gpu_num')): layer._specify_ddp_gpu_num(1) else: assert ('num_groups' in cfg_) layer = norm_layer(num_channels=num_features, **cfg_) for param in layer.parameters(): param.requires_grad = requires_grad return (name, layer)
def is_norm(layer, exclude=None): 'Check if a layer is a normalization layer.\n\n Args:\n layer (nn.Module): The layer to be checked.\n exclude (type | tuple[type]): Types to be excluded.\n\n Returns:\n bool: Whether the layer is a norm layer.\n ' if (exclude is not None): if (not isinstance(exclude, tuple)): exclude = (exclude,) if (not is_tuple_of(exclude, type)): raise TypeError(f'"exclude" must be either None or type or a tuple of types, but got {type(exclude)}: {exclude}') if (exclude and isinstance(layer, exclude)): return False all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm) return isinstance(layer, all_norm_bases)
def build_padding_layer(cfg, *args, **kwargs): 'Build padding layer.\n\n Args:\n cfg (None or dict): The padding layer config, which should contain:\n - type (str): Layer type.\n - layer args: Args needed to instantiate a padding layer.\n\n Returns:\n nn.Module: Created padding layer.\n ' if (not isinstance(cfg, dict)): raise TypeError('cfg must be a dict') if ('type' not in cfg): raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() padding_type = cfg_.pop('type') if (padding_type not in PADDING_LAYERS): raise KeyError(f'Unrecognized padding type {padding_type}.') else: padding_layer = PADDING_LAYERS.get(padding_type) layer = padding_layer(*args, **kwargs, **cfg_) return layer
def infer_abbr(class_type): 'Infer abbreviation from the class name.\n\n This method will infer the abbreviation to map class types to\n abbreviations.\n\n Rule 1: If the class has the property "abbr", return the property.\n Rule 2: Otherwise, the abbreviation falls back to snake case of class\n name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``.\n\n Args:\n class_type (type): The norm layer type.\n\n Returns:\n str: The inferred abbreviation.\n ' def camel2snack(word): 'Convert camel case word into snack case.\n\n Modified from `inflection lib\n <https://inflection.readthedocs.io/en/latest/#inflection.underscore>`_.\n\n Example::\n\n >>> camel2snack("FancyBlock")\n \'fancy_block\'\n ' word = re.sub('([A-Z]+)([A-Z][a-z])', '\\1_\\2', word) word = re.sub('([a-z\\d])([A-Z])', '\\1_\\2', word) word = word.replace('-', '_') return word.lower() if (not inspect.isclass(class_type)): raise TypeError(f'class_type must be a type, but got {type(class_type)}') if hasattr(class_type, '_abbr_'): return class_type._abbr_ else: return camel2snack(class_type.__name__)
def build_plugin_layer(cfg, postfix='', **kwargs): "Build plugin layer.\n\n Args:\n cfg (None or dict): cfg should contain:\n\n - type (str): identify plugin layer type.\n - layer args: args needed to instantiate a plugin layer.\n postfix (int, str): appended into norm abbreviation to\n create named layer. Default: ''.\n\n Returns:\n tuple[str, nn.Module]: The first one is the concatenation of\n abbreviation and postfix. The second is the created plugin layer.\n " if (not isinstance(cfg, dict)): raise TypeError('cfg must be a dict') if ('type' not in cfg): raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if (layer_type not in PLUGIN_LAYERS): raise KeyError(f'Unrecognized plugin type {layer_type}') plugin_layer = PLUGIN_LAYERS.get(layer_type) abbr = infer_abbr(plugin_layer) assert isinstance(postfix, (int, str)) name = (abbr + str(postfix)) layer = plugin_layer(**kwargs, **cfg_) return (name, layer)
class Scale(nn.Module): 'A learnable scale parameter.\n\n This layer scales the input by a learnable factor. It multiplies a\n learnable scale parameter of shape (1,) with input of any shape.\n\n Args:\n scale (float): Initial value of scale factor. Default: 1.0\n ' def __init__(self, scale=1.0): super(Scale, self).__init__() self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) def forward(self, x): return (x * self.scale)
@ACTIVATION_LAYERS.register_module() class Swish(nn.Module): 'Swish Module.\n\n This module applies the swish function:\n\n .. math::\n Swish(x) = x * Sigmoid(x)\n\n Returns:\n Tensor: The output tensor.\n ' def __init__(self): super(Swish, self).__init__() def forward(self, x): return (x * torch.sigmoid(x))
def build_positional_encoding(cfg, default_args=None): 'Builder for Position Encoding.' return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args)
def build_attention(cfg, default_args=None): 'Builder for attention.' return build_from_cfg(cfg, ATTENTION, default_args)
def build_feedforward_network(cfg, default_args=None): 'Builder for feed-forward network (FFN).' return build_from_cfg(cfg, FEEDFORWARD_NETWORK, default_args)
def build_transformer_layer(cfg, default_args=None): 'Builder for transformer layer.' return build_from_cfg(cfg, TRANSFORMER_LAYER, default_args)
def build_transformer_layer_sequence(cfg, default_args=None): 'Builder for transformer encoder and transformer decoder.' return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args)
class AdaptivePadding(nn.Module): 'Applies padding adaptively to the input.\n\n This module can make input get fully covered by filter\n you specified. It support two modes "same" and "corner". The\n "same" mode is same with "SAME" padding mode in TensorFlow, pad\n zero around input. The "corner" mode would pad zero\n to bottom right.\n\n Args:\n kernel_size (int | tuple): Size of the kernel. Default: 1.\n stride (int | tuple): Stride of the filter. Default: 1.\n dilation (int | tuple): Spacing between kernel elements.\n Default: 1.\n padding (str): Support "same" and "corner", "corner" mode\n would pad zero to bottom right, and "same" mode would\n pad zero around input. Default: "corner".\n\n Example:\n >>> kernel_size = 16\n >>> stride = 16\n >>> dilation = 1\n >>> input = torch.rand(1, 1, 15, 17)\n >>> adap_pad = AdaptivePadding(\n >>> kernel_size=kernel_size,\n >>> stride=stride,\n >>> dilation=dilation,\n >>> padding="corner")\n >>> out = adap_pad(input)\n >>> assert (out.shape[2], out.shape[3]) == (16, 32)\n >>> input = torch.rand(1, 1, 16, 17)\n >>> out = adap_pad(input)\n >>> assert (out.shape[2], out.shape[3]) == (16, 32)\n ' def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): super(AdaptivePadding, self).__init__() assert (padding in ('same', 'corner')) kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) self.padding = padding self.kernel_size = kernel_size self.stride = stride self.dilation = dilation def get_pad_shape(self, input_shape): 'Calculate the padding size of input.\n\n Args:\n input_shape (:obj:`torch.Size`): arrange as (H, W).\n\n Returns:\n Tuple[int]: The padding size along the\n original H and W directions\n ' (input_h, input_w) = input_shape (kernel_h, kernel_w) = self.kernel_size (stride_h, stride_w) = self.stride output_h = math.ceil((input_h / stride_h)) output_w = math.ceil((input_w / stride_w)) pad_h = max((((((output_h - 1) * stride_h) + ((kernel_h - 1) * self.dilation[0])) + 1) - input_h), 0) pad_w = max((((((output_w - 1) * stride_w) + ((kernel_w - 1) * self.dilation[1])) + 1) - input_w), 0) return (pad_h, pad_w) def forward(self, x): 'Add padding to `x`\n\n Args:\n x (Tensor): Input tensor has shape (B, C, H, W).\n\n Returns:\n Tensor: The tensor with adaptive padding\n ' (pad_h, pad_w) = self.get_pad_shape(x.size()[(- 2):]) if ((pad_h > 0) or (pad_w > 0)): if (self.padding == 'corner'): x = F.pad(x, [0, pad_w, 0, pad_h]) elif (self.padding == 'same'): x = F.pad(x, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))]) return x
class PatchEmbed(BaseModule): 'Image to Patch Embedding.\n\n We use a conv layer to implement PatchEmbed.\n\n Args:\n in_channels (int): The num of input channels. Default: 3\n embed_dims (int): The dimensions of embedding. Default: 768\n conv_type (str): The type of convolution\n to generate patch embedding. Default: "Conv2d".\n kernel_size (int): The kernel_size of embedding conv. Default: 16.\n stride (int): The slide stride of embedding conv.\n Default: 16.\n padding (int | tuple | string): The padding length of\n embedding conv. When it is a string, it means the mode\n of adaptive padding, support "same" and "corner" now.\n Default: "corner".\n dilation (int): The dilation rate of embedding conv. Default: 1.\n bias (bool): Bias of embed conv. Default: True.\n norm_cfg (dict, optional): Config dict for normalization layer.\n Default: None.\n input_size (int | tuple | None): The size of input, which will be\n used to calculate the out size. Only works when `dynamic_size`\n is False. Default: None.\n init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.\n Default: None.\n ' def __init__(self, in_channels=3, embed_dims=768, conv_type='Conv2d', kernel_size=16, stride=16, padding='corner', dilation=1, bias=True, norm_cfg=None, input_size=None, init_cfg=None): super(PatchEmbed, self).__init__(init_cfg=init_cfg) self.embed_dims = embed_dims if (stride is None): stride = kernel_size kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) if isinstance(padding, str): self.adaptive_padding = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) padding = 0 else: self.adaptive_padding = None padding = to_2tuple(padding) self.projection = build_conv_layer(dict(type=conv_type), in_channels=in_channels, out_channels=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) if (norm_cfg is not None): self.norm = build_norm_layer(norm_cfg, embed_dims)[1] else: self.norm = None if input_size: input_size = to_2tuple(input_size) self.init_input_size = input_size if self.adaptive_padding: (pad_h, pad_w) = self.adaptive_padding.get_pad_shape(input_size) (input_h, input_w) = input_size input_h = (input_h + pad_h) input_w = (input_w + pad_w) input_size = (input_h, input_w) h_out = (((((input_size[0] + (2 * padding[0])) - (dilation[0] * (kernel_size[0] - 1))) - 1) // stride[0]) + 1) w_out = (((((input_size[1] + (2 * padding[1])) - (dilation[1] * (kernel_size[1] - 1))) - 1) // stride[1]) + 1) self.init_out_size = (h_out, w_out) else: self.init_input_size = None self.init_out_size = None def forward(self, x): '\n Args:\n x (Tensor): Has shape (B, C, H, W). In most case, C is 3.\n\n Returns:\n tuple: Contains merged results and its spatial shape.\n\n - x (Tensor): Has shape (B, out_h * out_w, embed_dims)\n - out_size (tuple[int]): Spatial shape of x, arrange as\n (out_h, out_w).\n ' if self.adaptive_padding: x = self.adaptive_padding(x) x = self.projection(x) out_size = (x.shape[2], x.shape[3]) x = x.flatten(2).transpose(1, 2) if (self.norm is not None): x = self.norm(x) return (x, out_size)
class PatchMerging(BaseModule): 'Merge patch feature map.\n\n This layer groups feature map by kernel_size, and applies norm and linear\n layers to the grouped feature map ((used in Swin Transformer)).\n Our implementation uses `nn.Unfold` to\n merge patches, which is about 25% faster than the original\n implementation. However, we need to modify pretrained\n models for compatibility.\n\n Args:\n in_channels (int): The num of input channels.\n to gets fully covered by filter and stride you specified.\n out_channels (int): The num of output channels.\n kernel_size (int | tuple, optional): the kernel size in the unfold\n layer. Defaults to 2.\n stride (int | tuple, optional): the stride of the sliding blocks in the\n unfold layer. Default: None. (Would be set as `kernel_size`)\n padding (int | tuple | string ): The padding length of\n embedding conv. When it is a string, it means the mode\n of adaptive padding, support "same" and "corner" now.\n Default: "corner".\n dilation (int | tuple, optional): dilation parameter in the unfold\n layer. Default: 1.\n bias (bool, optional): Whether to add bias in linear layer or not.\n Defaults: False.\n norm_cfg (dict, optional): Config dict for normalization layer.\n Default: dict(type=\'LN\').\n init_cfg (dict, optional): The extra config for initialization.\n Default: None.\n ' def __init__(self, in_channels, out_channels, kernel_size=2, stride=None, padding='corner', dilation=1, bias=False, norm_cfg=dict(type='LN'), init_cfg=None): super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.out_channels = out_channels if stride: stride = stride else: stride = kernel_size kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) if isinstance(padding, str): self.adaptive_padding = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) padding = 0 else: self.adaptive_padding = None padding = to_2tuple(padding) self.sampler = nn.Unfold(kernel_size=kernel_size, dilation=dilation, padding=padding, stride=stride) sample_dim = ((kernel_size[0] * kernel_size[1]) * in_channels) if (norm_cfg is not None): self.norm = build_norm_layer(norm_cfg, sample_dim)[1] else: self.norm = None self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) def forward(self, x, input_size): '\n Args:\n x (Tensor): Has shape (B, H*W, C_in).\n input_size (tuple[int]): The spatial shape of x, arrange as (H, W).\n Default: None.\n\n Returns:\n tuple: Contains merged results and its spatial shape.\n\n - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out)\n - out_size (tuple[int]): Spatial shape of x, arrange as\n (Merged_H, Merged_W).\n ' (B, L, C) = x.shape assert isinstance(input_size, Sequence), f'Expect input_size is `Sequence` but get {input_size}' (H, W) = input_size assert (L == (H * W)), 'input feature has wrong size' x = x.view(B, H, W, C).permute([0, 3, 1, 2]) if self.adaptive_padding: x = self.adaptive_padding(x) (H, W) = x.shape[(- 2):] x = self.sampler(x) out_h = (((((H + (2 * self.sampler.padding[0])) - (self.sampler.dilation[0] * (self.sampler.kernel_size[0] - 1))) - 1) // self.sampler.stride[0]) + 1) out_w = (((((W + (2 * self.sampler.padding[1])) - (self.sampler.dilation[1] * (self.sampler.kernel_size[1] - 1))) - 1) // self.sampler.stride[1]) + 1) output_size = (out_h, out_w) x = x.transpose(1, 2) x = (self.norm(x) if self.norm else x) x = self.reduction(x) return (x, output_size)
@ATTENTION.register_module() class MultiheadAttention(BaseModule): 'A wrapper for ``torch.nn.MultiheadAttention``.\n\n This module implements MultiheadAttention with identity connection,\n and positional encoding is also passed as input.\n\n Args:\n embed_dims (int): The embedding dimension.\n num_heads (int): Parallel attention heads.\n attn_drop (float): A Dropout layer on attn_output_weights.\n Default: 0.0.\n proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.\n Default: 0.0.\n dropout_layer (obj:`ConfigDict`): The dropout_layer used\n when adding the shortcut.\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Default: None.\n batch_first (bool): When it is True, Key, Query and Value are shape of\n (batch, n, embed_dim), otherwise (n, batch, embed_dim).\n Default to False.\n ' def __init__(self, embed_dims, num_heads, attn_drop=0.0, proj_drop=0.0, dropout_layer=dict(type='Dropout', drop_prob=0.0), init_cfg=None, batch_first=False, **kwargs): super(MultiheadAttention, self).__init__(init_cfg) if ('dropout' in kwargs): warnings.warn('The arguments `dropout` in MultiheadAttention has been deprecated, now you can separately set `attn_drop`(float), proj_drop(float), and `dropout_layer`(dict) ', DeprecationWarning) attn_drop = kwargs['dropout'] dropout_layer['drop_prob'] = kwargs.pop('dropout') self.embed_dims = embed_dims self.num_heads = num_heads self.batch_first = batch_first self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop, **kwargs) self.proj_drop = nn.Dropout(proj_drop) self.dropout_layer = (build_dropout(dropout_layer) if dropout_layer else nn.Identity()) @deprecated_api_warning({'residual': 'identity'}, cls_name='MultiheadAttention') def forward(self, query, key=None, value=None, identity=None, query_pos=None, key_pos=None, attn_mask=None, key_padding_mask=None, **kwargs): 'Forward function for `MultiheadAttention`.\n\n **kwargs allow passing a more general data flow when combining\n with other operations in `transformerlayer`.\n\n Args:\n query (Tensor): The input query with shape [num_queries, bs,\n embed_dims] if self.batch_first is False, else\n [bs, num_queries embed_dims].\n key (Tensor): The key tensor with shape [num_keys, bs,\n embed_dims] if self.batch_first is False, else\n [bs, num_keys, embed_dims] .\n If None, the ``query`` will be used. Defaults to None.\n value (Tensor): The value tensor with same shape as `key`.\n Same in `nn.MultiheadAttention.forward`. Defaults to None.\n If None, the `key` will be used.\n identity (Tensor): This tensor, with the same shape as x,\n will be used for the identity link.\n If None, `x` will be used. Defaults to None.\n query_pos (Tensor): The positional encoding for query, with\n the same shape as `x`. If not None, it will\n be added to `x` before forward function. Defaults to None.\n key_pos (Tensor): The positional encoding for `key`, with the\n same shape as `key`. Defaults to None. If not None, it will\n be added to `key` before forward function. If None, and\n `query_pos` has the same shape as `key`, then `query_pos`\n will be used for `key_pos`. Defaults to None.\n attn_mask (Tensor): ByteTensor mask with shape [num_queries,\n num_keys]. Same in `nn.MultiheadAttention.forward`.\n Defaults to None.\n key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].\n Defaults to None.\n\n Returns:\n Tensor: forwarded results with shape\n [num_queries, bs, embed_dims]\n if self.batch_first is False, else\n [bs, num_queries embed_dims].\n ' if (key is None): key = query if (value is None): value = key if (identity is None): identity = query if (key_pos is None): if (query_pos is not None): if (query_pos.shape == key.shape): key_pos = query_pos else: warnings.warn(f'position encoding of key ismissing in {self.__class__.__name__}.') if (query_pos is not None): query = (query + query_pos) if (key_pos is not None): key = (key + key_pos) if self.batch_first: query = query.transpose(0, 1) key = key.transpose(0, 1) value = value.transpose(0, 1) out = self.attn(query=query, key=key, value=value, attn_mask=attn_mask, key_padding_mask=key_padding_mask)[0] if self.batch_first: out = out.transpose(0, 1) return (identity + self.dropout_layer(self.proj_drop(out)))
@FEEDFORWARD_NETWORK.register_module() class FFN(BaseModule): "Implements feed-forward networks (FFNs) with identity connection.\n\n Args:\n embed_dims (int): The feature dimension. Same as\n `MultiheadAttention`. Defaults: 256.\n feedforward_channels (int): The hidden dimension of FFNs.\n Defaults: 1024.\n num_fcs (int, optional): The number of fully-connected layers in\n FFNs. Default: 2.\n act_cfg (dict, optional): The activation config for FFNs.\n Default: dict(type='ReLU')\n ffn_drop (float, optional): Probability of an element to be\n zeroed in FFN. Default 0.0.\n add_identity (bool, optional): Whether to add the\n identity connection. Default: `True`.\n dropout_layer (obj:`ConfigDict`): The dropout_layer used\n when adding the shortcut.\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Default: None.\n " @deprecated_api_warning({'dropout': 'ffn_drop', 'add_residual': 'add_identity'}, cls_name='FFN') def __init__(self, embed_dims=256, feedforward_channels=1024, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.0, dropout_layer=None, add_identity=True, init_cfg=None, **kwargs): super(FFN, self).__init__(init_cfg) assert (num_fcs >= 2), f'num_fcs should be no less than 2. got {num_fcs}.' self.embed_dims = embed_dims self.feedforward_channels = feedforward_channels self.num_fcs = num_fcs self.act_cfg = act_cfg self.activate = build_activation_layer(act_cfg) layers = [] in_channels = embed_dims for _ in range((num_fcs - 1)): layers.append(Sequential(Linear(in_channels, feedforward_channels), self.activate, nn.Dropout(ffn_drop))) in_channels = feedforward_channels layers.append(Linear(feedforward_channels, embed_dims)) layers.append(nn.Dropout(ffn_drop)) self.layers = Sequential(*layers) self.dropout_layer = (build_dropout(dropout_layer) if dropout_layer else torch.nn.Identity()) self.add_identity = add_identity @deprecated_api_warning({'residual': 'identity'}, cls_name='FFN') def forward(self, x, identity=None): 'Forward function for `FFN`.\n\n The function would add x to the output tensor if residue is None.\n ' out = self.layers(x) if (not self.add_identity): return self.dropout_layer(out) if (identity is None): identity = x return (identity + self.dropout_layer(out))
@TRANSFORMER_LAYER.register_module() class BaseTransformerLayer(BaseModule): "Base `TransformerLayer` for vision transformer.\n\n It can be built from `mmcv.ConfigDict` and support more flexible\n customization, for example, using any number of `FFN or LN ` and\n use different kinds of `attention` by specifying a list of `ConfigDict`\n named `attn_cfgs`. It is worth mentioning that it supports `prenorm`\n when you specifying `norm` as the first element of `operation_order`.\n More details about the `prenorm`: `On Layer Normalization in the\n Transformer Architecture <https://arxiv.org/abs/2002.04745>`_ .\n\n Args:\n attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):\n Configs for `self_attention` or `cross_attention` modules,\n The order of the configs in the list should be consistent with\n corresponding attentions in operation_order.\n If it is a dict, all of the attention modules in operation_order\n will be built with this config. Default: None.\n ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):\n Configs for FFN, The order of the configs in the list should be\n consistent with corresponding ffn in operation_order.\n If it is a dict, all of the attention modules in operation_order\n will be built with this config.\n operation_order (tuple[str]): The execution order of operation\n in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm').\n Support `prenorm` when you specifying first element as `norm`.\n Default:None.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='LN').\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Default: None.\n batch_first (bool): Key, Query and Value are shape\n of (batch, n, embed_dim)\n or (n, batch, embed_dim). Default to False.\n " def __init__(self, attn_cfgs=None, ffn_cfgs=dict(type='FFN', embed_dims=256, feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True)), operation_order=None, norm_cfg=dict(type='LN'), init_cfg=None, batch_first=False, **kwargs): deprecated_args = dict(feedforward_channels='feedforward_channels', ffn_dropout='ffn_drop', ffn_num_fcs='num_fcs') for (ori_name, new_name) in deprecated_args.items(): if (ori_name in kwargs): warnings.warn(f'The arguments `{ori_name}` in BaseTransformerLayer has been deprecated, now you should set `{new_name}` and other FFN related arguments to a dict named `ffn_cfgs`. ', DeprecationWarning) ffn_cfgs[new_name] = kwargs[ori_name] super(BaseTransformerLayer, self).__init__(init_cfg) self.batch_first = batch_first assert ((set(operation_order) & set(['self_attn', 'norm', 'ffn', 'cross_attn'])) == set(operation_order)), f"The operation_order of {self.__class__.__name__} should contains all four operation type {['self_attn', 'norm', 'ffn', 'cross_attn']}" num_attn = (operation_order.count('self_attn') + operation_order.count('cross_attn')) if isinstance(attn_cfgs, dict): attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)] else: assert (num_attn == len(attn_cfgs)), f'The length of attn_cfg {num_attn} is not consistent with the number of attentionin operation_order {operation_order}.' self.num_attn = num_attn self.operation_order = operation_order self.norm_cfg = norm_cfg self.pre_norm = (operation_order[0] == 'norm') self.attentions = ModuleList() index = 0 for operation_name in operation_order: if (operation_name in ['self_attn', 'cross_attn']): if ('batch_first' in attn_cfgs[index]): assert (self.batch_first == attn_cfgs[index]['batch_first']) else: attn_cfgs[index]['batch_first'] = self.batch_first attention = build_attention(attn_cfgs[index]) attention.operation_name = operation_name self.attentions.append(attention) index += 1 self.embed_dims = self.attentions[0].embed_dims self.ffns = ModuleList() num_ffns = operation_order.count('ffn') if isinstance(ffn_cfgs, dict): ffn_cfgs = ConfigDict(ffn_cfgs) if isinstance(ffn_cfgs, dict): ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)] assert (len(ffn_cfgs) == num_ffns) for ffn_index in range(num_ffns): if ('embed_dims' not in ffn_cfgs[ffn_index]): ffn_cfgs[ffn_index]['embed_dims'] = self.embed_dims else: assert (ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims) self.ffns.append(build_feedforward_network(ffn_cfgs[ffn_index], dict(type='FFN'))) self.norms = ModuleList() num_norms = operation_order.count('norm') for _ in range(num_norms): self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1]) def forward(self, query, key=None, value=None, query_pos=None, key_pos=None, attn_masks=None, query_key_padding_mask=None, key_padding_mask=None, **kwargs): 'Forward function for `TransformerDecoderLayer`.\n\n **kwargs contains some specific arguments of attentions.\n\n Args:\n query (Tensor): The input query with shape\n [num_queries, bs, embed_dims] if\n self.batch_first is False, else\n [bs, num_queries embed_dims].\n key (Tensor): The key tensor with shape [num_keys, bs,\n embed_dims] if self.batch_first is False, else\n [bs, num_keys, embed_dims] .\n value (Tensor): The value tensor with same shape as `key`.\n query_pos (Tensor): The positional encoding for `query`.\n Default: None.\n key_pos (Tensor): The positional encoding for `key`.\n Default: None.\n attn_masks (List[Tensor] | None): 2D Tensor used in\n calculation of corresponding attention. The length of\n it should equal to the number of `attention` in\n `operation_order`. Default: None.\n query_key_padding_mask (Tensor): ByteTensor for `query`, with\n shape [bs, num_queries]. Only used in `self_attn` layer.\n Defaults to None.\n key_padding_mask (Tensor): ByteTensor for `query`, with\n shape [bs, num_keys]. Default: None.\n\n Returns:\n Tensor: forwarded results with shape [num_queries, bs, embed_dims].\n ' norm_index = 0 attn_index = 0 ffn_index = 0 identity = query if (attn_masks is None): attn_masks = [None for _ in range(self.num_attn)] elif isinstance(attn_masks, torch.Tensor): attn_masks = [copy.deepcopy(attn_masks) for _ in range(self.num_attn)] warnings.warn(f'Use same attn_mask in all attentions in {self.__class__.__name__} ') else: assert (len(attn_masks) == self.num_attn), f'The length of attn_masks {len(attn_masks)} must be equal to the number of attention in operation_order {self.num_attn}' for layer in self.operation_order: if (layer == 'self_attn'): temp_key = temp_value = query query = self.attentions[attn_index](query, temp_key, temp_value, (identity if self.pre_norm else None), query_pos=query_pos, key_pos=query_pos, attn_mask=attn_masks[attn_index], key_padding_mask=query_key_padding_mask, **kwargs) attn_index += 1 identity = query elif (layer == 'norm'): query = self.norms[norm_index](query) norm_index += 1 elif (layer == 'cross_attn'): query = self.attentions[attn_index](query, key, value, (identity if self.pre_norm else None), query_pos=query_pos, key_pos=key_pos, attn_mask=attn_masks[attn_index], key_padding_mask=key_padding_mask, **kwargs) attn_index += 1 identity = query elif (layer == 'ffn'): query = self.ffns[ffn_index](query, (identity if self.pre_norm else None)) ffn_index += 1 return query
@TRANSFORMER_LAYER_SEQUENCE.register_module() class TransformerLayerSequence(BaseModule): 'Base class for TransformerEncoder and TransformerDecoder in vision\n transformer.\n\n As base-class of Encoder and Decoder in vision transformer.\n Support customization such as specifying different kind\n of `transformer_layer` in `transformer_coder`.\n\n Args:\n transformerlayer (list[obj:`mmcv.ConfigDict`] |\n obj:`mmcv.ConfigDict`): Config of transformerlayer\n in TransformerCoder. If it is obj:`mmcv.ConfigDict`,\n it would be repeated `num_layer` times to a\n list[`mmcv.ConfigDict`]. Default: None.\n num_layers (int): The number of `TransformerLayer`. Default: None.\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Default: None.\n ' def __init__(self, transformerlayers=None, num_layers=None, init_cfg=None): super(TransformerLayerSequence, self).__init__(init_cfg) if isinstance(transformerlayers, dict): transformerlayers = [copy.deepcopy(transformerlayers) for _ in range(num_layers)] else: assert (isinstance(transformerlayers, list) and (len(transformerlayers) == num_layers)) self.num_layers = num_layers self.layers = ModuleList() for i in range(num_layers): self.layers.append(build_transformer_layer(transformerlayers[i])) self.embed_dims = self.layers[0].embed_dims self.pre_norm = self.layers[0].pre_norm def forward(self, query, key, value, query_pos=None, key_pos=None, attn_masks=None, query_key_padding_mask=None, key_padding_mask=None, **kwargs): 'Forward function for `TransformerCoder`.\n\n Args:\n query (Tensor): Input query with shape\n `(num_queries, bs, embed_dims)`.\n key (Tensor): The key tensor with shape\n `(num_keys, bs, embed_dims)`.\n value (Tensor): The value tensor with shape\n `(num_keys, bs, embed_dims)`.\n query_pos (Tensor): The positional encoding for `query`.\n Default: None.\n key_pos (Tensor): The positional encoding for `key`.\n Default: None.\n attn_masks (List[Tensor], optional): Each element is 2D Tensor\n which is used in calculation of corresponding attention in\n operation_order. Default: None.\n query_key_padding_mask (Tensor): ByteTensor for `query`, with\n shape [bs, num_queries]. Only used in self-attention\n Default: None.\n key_padding_mask (Tensor): ByteTensor for `query`, with\n shape [bs, num_keys]. Default: None.\n\n Returns:\n Tensor: results with shape [num_queries, bs, embed_dims].\n ' for layer in self.layers: query = layer(query, key, value, query_pos=query_pos, key_pos=key_pos, attn_masks=attn_masks, query_key_padding_mask=query_key_padding_mask, key_padding_mask=key_padding_mask, **kwargs) return query
@UPSAMPLE_LAYERS.register_module(name='pixel_shuffle') class PixelShufflePack(nn.Module): 'Pixel Shuffle upsample layer.\n\n This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to\n achieve a simple upsampling with pixel shuffle.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n scale_factor (int): Upsample ratio.\n upsample_kernel (int): Kernel size of the conv layer to expand the\n channels.\n ' def __init__(self, in_channels, out_channels, scale_factor, upsample_kernel): super(PixelShufflePack, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.scale_factor = scale_factor self.upsample_kernel = upsample_kernel self.upsample_conv = nn.Conv2d(self.in_channels, ((self.out_channels * scale_factor) * scale_factor), self.upsample_kernel, padding=((self.upsample_kernel - 1) // 2)) self.init_weights() def init_weights(self): xavier_init(self.upsample_conv, distribution='uniform') def forward(self, x): x = self.upsample_conv(x) x = F.pixel_shuffle(x, self.scale_factor) return x
def build_upsample_layer(cfg, *args, **kwargs): 'Build upsample layer.\n\n Args:\n cfg (dict): The upsample layer config, which should contain:\n\n - type (str): Layer type.\n - scale_factor (int): Upsample ratio, which is not applicable to\n deconv.\n - layer args: Args needed to instantiate a upsample layer.\n args (argument list): Arguments passed to the ``__init__``\n method of the corresponding conv layer.\n kwargs (keyword arguments): Keyword arguments passed to the\n ``__init__`` method of the corresponding conv layer.\n\n Returns:\n nn.Module: Created upsample layer.\n ' if (not isinstance(cfg, dict)): raise TypeError(f'cfg must be a dict, but got {type(cfg)}') if ('type' not in cfg): raise KeyError(f'the cfg dict must contain the key "type", but got {cfg}') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if (layer_type not in UPSAMPLE_LAYERS): raise KeyError(f'Unrecognized upsample type {layer_type}') else: upsample = UPSAMPLE_LAYERS.get(layer_type) if (upsample is nn.Upsample): cfg_['mode'] = layer_type layer = upsample(*args, **kwargs, **cfg_) return layer
def obsolete_torch_version(torch_version, version_threshold): return ((torch_version == 'parrots') or (torch_version <= version_threshold))
class NewEmptyTensorOp(torch.autograd.Function): @staticmethod def forward(ctx, x, new_shape): ctx.shape = x.shape return x.new_empty(new_shape) @staticmethod def backward(ctx, grad): shape = ctx.shape return (NewEmptyTensorOp.apply(grad, shape), None)
@CONV_LAYERS.register_module('Conv', force=True) class Conv2d(nn.Conv2d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))): out_shape = [x.shape[0], self.out_channels] for (i, k, p, s, d) in zip(x.shape[(- 2):], self.kernel_size, self.padding, self.stride, self.dilation): o = ((((i + (2 * p)) - ((d * (k - 1)) + 1)) // s) + 1) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
@CONV_LAYERS.register_module('Conv3d', force=True) class Conv3d(nn.Conv3d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))): out_shape = [x.shape[0], self.out_channels] for (i, k, p, s, d) in zip(x.shape[(- 3):], self.kernel_size, self.padding, self.stride, self.dilation): o = ((((i + (2 * p)) - ((d * (k - 1)) + 1)) // s) + 1) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
@CONV_LAYERS.register_module() @CONV_LAYERS.register_module('deconv') @UPSAMPLE_LAYERS.register_module('deconv', force=True) class ConvTranspose2d(nn.ConvTranspose2d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))): out_shape = [x.shape[0], self.out_channels] for (i, k, p, s, d, op) in zip(x.shape[(- 2):], self.kernel_size, self.padding, self.stride, self.dilation, self.output_padding): out_shape.append((((((i - 1) * s) - (2 * p)) + ((d * (k - 1)) + 1)) + op)) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
@CONV_LAYERS.register_module() @CONV_LAYERS.register_module('deconv3d') @UPSAMPLE_LAYERS.register_module('deconv3d', force=True) class ConvTranspose3d(nn.ConvTranspose3d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))): out_shape = [x.shape[0], self.out_channels] for (i, k, p, s, d, op) in zip(x.shape[(- 3):], self.kernel_size, self.padding, self.stride, self.dilation, self.output_padding): out_shape.append((((((i - 1) * s) - (2 * p)) + ((d * (k - 1)) + 1)) + op)) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
class MaxPool2d(nn.MaxPool2d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 9))): out_shape = list(x.shape[:2]) for (i, k, p, s, d) in zip(x.shape[(- 2):], _pair(self.kernel_size), _pair(self.padding), _pair(self.stride), _pair(self.dilation)): o = ((((i + (2 * p)) - ((d * (k - 1)) + 1)) / s) + 1) o = (math.ceil(o) if self.ceil_mode else math.floor(o)) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) return empty return super().forward(x)
class MaxPool3d(nn.MaxPool3d): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 9))): out_shape = list(x.shape[:2]) for (i, k, p, s, d) in zip(x.shape[(- 3):], _triple(self.kernel_size), _triple(self.padding), _triple(self.stride), _triple(self.dilation)): o = ((((i + (2 * p)) - ((d * (k - 1)) + 1)) / s) + 1) o = (math.ceil(o) if self.ceil_mode else math.floor(o)) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) return empty return super().forward(x)
class Linear(torch.nn.Linear): def forward(self, x): if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 5))): out_shape = [x.shape[0], self.out_features] empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0) return (empty + dummy) else: return empty return super().forward(x)
def build_model_from_cfg(cfg, registry, default_args=None): 'Build a PyTorch model from config dict(s). Different from\n ``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built.\n\n Args:\n cfg (dict, list[dict]): The config of modules, is is either a config\n dict or a list of config dicts. If cfg is a list, a\n the built modules will be wrapped with ``nn.Sequential``.\n registry (:obj:`Registry`): A registry the module belongs to.\n default_args (dict, optional): Default arguments to build the module.\n Defaults to None.\n\n Returns:\n nn.Module: A built nn module.\n ' if isinstance(cfg, list): modules = [build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg] return Sequential(*modules) else: return build_from_cfg(cfg, registry, default_args)
def conv3x3(in_planes, out_planes, stride=1, dilation=1): '3x3 convolution with padding.' return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False): super(BasicBlock, self).__init__() assert (style in ['pytorch', 'caffe']) self.conv1 = conv3x3(inplanes, planes, stride, dilation) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.dilation = dilation assert (not with_cp) def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False): 'Bottleneck block.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n ' super(Bottleneck, self).__init__() assert (style in ['pytorch', 'caffe']) if (style == 'pytorch'): conv1_stride = 1 conv2_stride = stride else: conv1_stride = stride conv2_stride = 1 self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=conv2_stride, padding=dilation, dilation=dilation, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, (planes * self.expansion), kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d((planes * self.expansion)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp def forward(self, x): def _inner_forward(x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.downsample is not None): residual = self.downsample(x) out += residual return out if (self.with_cp and x.requires_grad): out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out
def make_res_layer(block, inplanes, planes, blocks, stride=1, dilation=1, style='pytorch', with_cp=False): downsample = None if ((stride != 1) or (inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.Conv2d(inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion))) layers = [] layers.append(block(inplanes, planes, stride, dilation, downsample, style=style, with_cp=with_cp)) inplanes = (planes * block.expansion) for _ in range(1, blocks): layers.append(block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp)) return nn.Sequential(*layers)
class ResNet(nn.Module): 'ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze\n running stats (mean and var).\n bn_frozen (bool): Whether to freeze weight and bias of BN layers.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n ' arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))} def __init__(self, depth, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', frozen_stages=(- 1), bn_eval=True, bn_frozen=False, with_cp=False): super(ResNet, self).__init__() if (depth not in self.arch_settings): raise KeyError(f'invalid depth {depth} for resnet') assert ((num_stages >= 1) and (num_stages <= 4)) (block, stage_blocks) = self.arch_settings[depth] stage_blocks = stage_blocks[:num_stages] assert (len(strides) == len(dilations) == num_stages) assert (max(out_indices) < num_stages) self.out_indices = out_indices self.style = style self.frozen_stages = frozen_stages self.bn_eval = bn_eval self.bn_frozen = bn_frozen self.with_cp = with_cp self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.res_layers = [] for (i, num_blocks) in enumerate(stage_blocks): stride = strides[i] dilation = dilations[i] planes = (64 * (2 ** i)) res_layer = make_res_layer(block, self.inplanes, planes, num_blocks, stride=stride, dilation=dilation, style=self.style, with_cp=with_cp) self.inplanes = (planes * block.expansion) layer_name = f'layer{(i + 1)}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self.feat_dim = ((block.expansion * 64) * (2 ** (len(stage_blocks) - 1))) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = logging.getLogger() from ..runner import load_checkpoint load_checkpoint(self, pretrained, strict=False, logger=logger) elif (pretrained is None): for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, nn.BatchNorm2d): constant_init(m, 1) else: raise TypeError('pretrained must be a str or None') def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for (i, layer_name) in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if (i in self.out_indices): outs.append(x) if (len(outs) == 1): return outs[0] else: return tuple(outs) def train(self, mode=True): super(ResNet, self).train(mode) if self.bn_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() if self.bn_frozen: for params in m.parameters(): params.requires_grad = False if (mode and (self.frozen_stages >= 0)): for param in self.conv1.parameters(): param.requires_grad = False for param in self.bn1.parameters(): param.requires_grad = False self.bn1.eval() self.bn1.weight.requires_grad = False self.bn1.bias.requires_grad = False for i in range(1, (self.frozen_stages + 1)): mod = getattr(self, f'layer{i}') mod.eval() for param in mod.parameters(): param.requires_grad = False
def get_model_complexity_info(model, input_shape, print_per_layer_stat=True, as_strings=True, input_constructor=None, flush=False, ost=sys.stdout): 'Get complexity information of a model.\n\n This method can calculate FLOPs and parameter counts of a model with\n corresponding input shape. It can also print complexity information for\n each layer in a model.\n\n Supported layers are listed as below:\n - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.\n - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``,\n ``nn.LeakyReLU``, ``nn.ReLU6``.\n - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,\n ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,\n ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,\n ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,\n ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.\n - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,\n ``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,\n ``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.\n - Linear: ``nn.Linear``.\n - Deconvolution: ``nn.ConvTranspose2d``.\n - Upsample: ``nn.Upsample``.\n\n Args:\n model (nn.Module): The model for complexity calculation.\n input_shape (tuple): Input shape used for calculation.\n print_per_layer_stat (bool): Whether to print complexity information\n for each layer in a model. Default: True.\n as_strings (bool): Output FLOPs and params counts in a string form.\n Default: True.\n input_constructor (None | callable): If specified, it takes a callable\n method that generates input. otherwise, it will generate a random\n tensor with input shape to calculate FLOPs. Default: None.\n flush (bool): same as that in :func:`print`. Default: False.\n ost (stream): same as ``file`` param in :func:`print`.\n Default: sys.stdout.\n\n Returns:\n tuple[float | str]: If ``as_strings`` is set to True, it will return\n FLOPs and parameter counts in a string format. otherwise, it will\n return those in a float number format.\n ' assert (type(input_shape) is tuple) assert (len(input_shape) >= 1) assert isinstance(model, nn.Module) flops_model = add_flops_counting_methods(model) flops_model.eval() flops_model.start_flops_count() if input_constructor: input = input_constructor(input_shape) _ = flops_model(**input) else: try: batch = torch.ones(()).new_empty((1, *input_shape), dtype=next(flops_model.parameters()).dtype, device=next(flops_model.parameters()).device) except StopIteration: batch = torch.ones(()).new_empty((1, *input_shape)) _ = flops_model(batch) (flops_count, params_count) = flops_model.compute_average_flops_cost() if print_per_layer_stat: print_model_with_flops(flops_model, flops_count, params_count, ost=ost, flush=flush) flops_model.stop_flops_count() if as_strings: return (flops_to_string(flops_count), params_to_string(params_count)) return (flops_count, params_count)
def flops_to_string(flops, units='GFLOPs', precision=2): "Convert FLOPs number into a string.\n\n Note that Here we take a multiply-add counts as one FLOP.\n\n Args:\n flops (float): FLOPs number to be converted.\n units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',\n 'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically\n choose the most suitable unit for FLOPs. Default: 'GFLOPs'.\n precision (int): Digit number after the decimal point. Default: 2.\n\n Returns:\n str: The converted FLOPs number with units.\n\n Examples:\n >>> flops_to_string(1e9)\n '1.0 GFLOPs'\n >>> flops_to_string(2e5, 'MFLOPs')\n '0.2 MFLOPs'\n >>> flops_to_string(3e-9, None)\n '3e-09 FLOPs'\n " if (units is None): if ((flops // (10 ** 9)) > 0): return (str(round((flops / (10.0 ** 9)), precision)) + ' GFLOPs') elif ((flops // (10 ** 6)) > 0): return (str(round((flops / (10.0 ** 6)), precision)) + ' MFLOPs') elif ((flops // (10 ** 3)) > 0): return (str(round((flops / (10.0 ** 3)), precision)) + ' KFLOPs') else: return (str(flops) + ' FLOPs') elif (units == 'GFLOPs'): return ((str(round((flops / (10.0 ** 9)), precision)) + ' ') + units) elif (units == 'MFLOPs'): return ((str(round((flops / (10.0 ** 6)), precision)) + ' ') + units) elif (units == 'KFLOPs'): return ((str(round((flops / (10.0 ** 3)), precision)) + ' ') + units) else: return (str(flops) + ' FLOPs')
def params_to_string(num_params, units=None, precision=2): "Convert parameter number into a string.\n\n Args:\n num_params (float): Parameter number to be converted.\n units (str | None): Converted FLOPs units. Options are None, 'M',\n 'K' and ''. If set to None, it will automatically choose the most\n suitable unit for Parameter number. Default: None.\n precision (int): Digit number after the decimal point. Default: 2.\n\n Returns:\n str: The converted parameter number with units.\n\n Examples:\n >>> params_to_string(1e9)\n '1000.0 M'\n >>> params_to_string(2e5)\n '200.0 k'\n >>> params_to_string(3e-9)\n '3e-09'\n " if (units is None): if ((num_params // (10 ** 6)) > 0): return (str(round((num_params / (10 ** 6)), precision)) + ' M') elif (num_params // (10 ** 3)): return (str(round((num_params / (10 ** 3)), precision)) + ' k') else: return str(num_params) elif (units == 'M'): return ((str(round((num_params / (10.0 ** 6)), precision)) + ' ') + units) elif (units == 'K'): return ((str(round((num_params / (10.0 ** 3)), precision)) + ' ') + units) else: return str(num_params)
def print_model_with_flops(model, total_flops, total_params, units='GFLOPs', precision=3, ost=sys.stdout, flush=False): "Print a model with FLOPs for each layer.\n\n Args:\n model (nn.Module): The model to be printed.\n total_flops (float): Total FLOPs of the model.\n total_params (float): Total parameter counts of the model.\n units (str | None): Converted FLOPs units. Default: 'GFLOPs'.\n precision (int): Digit number after the decimal point. Default: 3.\n ost (stream): same as `file` param in :func:`print`.\n Default: sys.stdout.\n flush (bool): same as that in :func:`print`. Default: False.\n\n Example:\n >>> class ExampleModel(nn.Module):\n\n >>> def __init__(self):\n >>> super().__init__()\n >>> self.conv1 = nn.Conv2d(3, 8, 3)\n >>> self.conv2 = nn.Conv2d(8, 256, 3)\n >>> self.conv3 = nn.Conv2d(256, 8, 3)\n >>> self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n >>> self.flatten = nn.Flatten()\n >>> self.fc = nn.Linear(8, 1)\n\n >>> def forward(self, x):\n >>> x = self.conv1(x)\n >>> x = self.conv2(x)\n >>> x = self.conv3(x)\n >>> x = self.avg_pool(x)\n >>> x = self.flatten(x)\n >>> x = self.fc(x)\n >>> return x\n\n >>> model = ExampleModel()\n >>> x = (3, 16, 16)\n to print the complexity information state for each layer, you can use\n >>> get_model_complexity_info(model, x)\n or directly use\n >>> print_model_with_flops(model, 4579784.0, 37361)\n ExampleModel(\n 0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs,\n (conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1)) # noqa: E501\n (conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1))\n (conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1))\n (avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1))\n (flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, )\n (fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True)\n )\n " def accumulate_params(self): if is_supported_instance(self): return self.__params__ else: sum = 0 for m in self.children(): sum += m.accumulate_params() return sum def accumulate_flops(self): if is_supported_instance(self): return (self.__flops__ / model.__batch_counter__) else: sum = 0 for m in self.children(): sum += m.accumulate_flops() return sum def flops_repr(self): accumulated_num_params = self.accumulate_params() accumulated_flops_cost = self.accumulate_flops() return ', '.join([params_to_string(accumulated_num_params, units='M', precision=precision), '{:.3%} Params'.format((accumulated_num_params / total_params)), flops_to_string(accumulated_flops_cost, units=units, precision=precision), '{:.3%} FLOPs'.format((accumulated_flops_cost / total_flops)), self.original_extra_repr()]) def add_extra_repr(m): m.accumulate_flops = accumulate_flops.__get__(m) m.accumulate_params = accumulate_params.__get__(m) flops_extra_repr = flops_repr.__get__(m) if (m.extra_repr != flops_extra_repr): m.original_extra_repr = m.extra_repr m.extra_repr = flops_extra_repr assert (m.extra_repr != m.original_extra_repr) def del_extra_repr(m): if hasattr(m, 'original_extra_repr'): m.extra_repr = m.original_extra_repr del m.original_extra_repr if hasattr(m, 'accumulate_flops'): del m.accumulate_flops model.apply(add_extra_repr) print(model, file=ost, flush=flush) model.apply(del_extra_repr)
def get_model_parameters_number(model): 'Calculate parameter number of a model.\n\n Args:\n model (nn.module): The model for parameter number calculation.\n\n Returns:\n float: Parameter number of the model.\n ' num_params = sum((p.numel() for p in model.parameters() if p.requires_grad)) return num_params
def add_flops_counting_methods(net_main_module): net_main_module.start_flops_count = start_flops_count.__get__(net_main_module) net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module) net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module) net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module) net_main_module.reset_flops_count() return net_main_module
def compute_average_flops_cost(self): 'Compute average FLOPs cost.\n\n A method to compute average FLOPs cost, which will be available after\n `add_flops_counting_methods()` is called on a desired net object.\n\n Returns:\n float: Current mean flops consumption per image.\n ' batches_count = self.__batch_counter__ flops_sum = 0 for module in self.modules(): if is_supported_instance(module): flops_sum += module.__flops__ params_sum = get_model_parameters_number(self) return ((flops_sum / batches_count), params_sum)
def start_flops_count(self): 'Activate the computation of mean flops consumption per image.\n\n A method to activate the computation of mean flops consumption per image.\n which will be available after ``add_flops_counting_methods()`` is called on\n a desired net object. It should be called before running the network.\n ' add_batch_counter_hook_function(self) def add_flops_counter_hook_function(module): if is_supported_instance(module): if hasattr(module, '__flops_handle__'): return else: handle = module.register_forward_hook(get_modules_mapping()[type(module)]) module.__flops_handle__ = handle self.apply(partial(add_flops_counter_hook_function))
def stop_flops_count(self): 'Stop computing the mean flops consumption per image.\n\n A method to stop computing the mean flops consumption per image, which will\n be available after ``add_flops_counting_methods()`` is called on a desired\n net object. It can be called to pause the computation whenever.\n ' remove_batch_counter_hook_function(self) self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self): 'Reset statistics computed so far.\n\n A method to Reset computed statistics, which will be available after\n `add_flops_counting_methods()` is called on a desired net object.\n ' add_batch_counter_variables_or_reset(self) self.apply(add_flops_counter_variable_or_reset)
def empty_flops_counter_hook(module, input, output): module.__flops__ += 0
def upsample_flops_counter_hook(module, input, output): output_size = output[0] batch_size = output_size.shape[0] output_elements_count = batch_size for val in output_size.shape[1:]: output_elements_count *= val module.__flops__ += int(output_elements_count)
def relu_flops_counter_hook(module, input, output): active_elements_count = output.numel() module.__flops__ += int(active_elements_count)
def linear_flops_counter_hook(module, input, output): input = input[0] output_last_dim = output.shape[(- 1)] module.__flops__ += int((np.prod(input.shape) * output_last_dim))
def pool_flops_counter_hook(module, input, output): input = input[0] module.__flops__ += int(np.prod(input.shape))
def norm_flops_counter_hook(module, input, output): input = input[0] batch_flops = np.prod(input.shape) if (getattr(module, 'affine', False) or getattr(module, 'elementwise_affine', False)): batch_flops *= 2 module.__flops__ += int(batch_flops)
def deconv_flops_counter_hook(conv_module, input, output): input = input[0] batch_size = input.shape[0] (input_height, input_width) = input.shape[2:] (kernel_height, kernel_width) = conv_module.kernel_size in_channels = conv_module.in_channels out_channels = conv_module.out_channels groups = conv_module.groups filters_per_channel = (out_channels // groups) conv_per_position_flops = (((kernel_height * kernel_width) * in_channels) * filters_per_channel) active_elements_count = ((batch_size * input_height) * input_width) overall_conv_flops = (conv_per_position_flops * active_elements_count) bias_flops = 0 if (conv_module.bias is not None): (output_height, output_width) = output.shape[2:] bias_flops = (((out_channels * batch_size) * output_height) * output_width) overall_flops = (overall_conv_flops + bias_flops) conv_module.__flops__ += int(overall_flops)
def conv_flops_counter_hook(conv_module, input, output): input = input[0] batch_size = input.shape[0] output_dims = list(output.shape[2:]) kernel_dims = list(conv_module.kernel_size) in_channels = conv_module.in_channels out_channels = conv_module.out_channels groups = conv_module.groups filters_per_channel = (out_channels // groups) conv_per_position_flops = ((int(np.prod(kernel_dims)) * in_channels) * filters_per_channel) active_elements_count = (batch_size * int(np.prod(output_dims))) overall_conv_flops = (conv_per_position_flops * active_elements_count) bias_flops = 0 if (conv_module.bias is not None): bias_flops = (out_channels * active_elements_count) overall_flops = (overall_conv_flops + bias_flops) conv_module.__flops__ += int(overall_flops)
def batch_counter_hook(module, input, output): batch_size = 1 if (len(input) > 0): input = input[0] batch_size = len(input) else: warnings.warn('No positional inputs found for a module, assuming batch size is 1.') module.__batch_counter__ += batch_size
def add_batch_counter_variables_or_reset(module): module.__batch_counter__ = 0
def add_batch_counter_hook_function(module): if hasattr(module, '__batch_counter_handle__'): return handle = module.register_forward_hook(batch_counter_hook) module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module): if hasattr(module, '__batch_counter_handle__'): module.__batch_counter_handle__.remove() del module.__batch_counter_handle__
def add_flops_counter_variable_or_reset(module): if is_supported_instance(module): if (hasattr(module, '__flops__') or hasattr(module, '__params__')): warnings.warn((('variables __flops__ or __params__ are already defined for the module' + type(module).__name__) + ' ptflops can affect your code!')) module.__flops__ = 0 module.__params__ = get_model_parameters_number(module)
def is_supported_instance(module): if (type(module) in get_modules_mapping()): return True return False
def remove_flops_counter_hook_function(module): if is_supported_instance(module): if hasattr(module, '__flops_handle__'): module.__flops_handle__.remove() del module.__flops_handle__
def get_modules_mapping(): return {nn.Conv1d: conv_flops_counter_hook, nn.Conv2d: conv_flops_counter_hook, mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook, nn.Conv3d: conv_flops_counter_hook, mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook, nn.ReLU: relu_flops_counter_hook, nn.PReLU: relu_flops_counter_hook, nn.ELU: relu_flops_counter_hook, nn.LeakyReLU: relu_flops_counter_hook, nn.ReLU6: relu_flops_counter_hook, nn.MaxPool1d: pool_flops_counter_hook, nn.AvgPool1d: pool_flops_counter_hook, nn.AvgPool2d: pool_flops_counter_hook, nn.MaxPool2d: pool_flops_counter_hook, mmcv.cnn.bricks.MaxPool2d: pool_flops_counter_hook, nn.MaxPool3d: pool_flops_counter_hook, mmcv.cnn.bricks.MaxPool3d: pool_flops_counter_hook, nn.AvgPool3d: pool_flops_counter_hook, nn.AdaptiveMaxPool1d: pool_flops_counter_hook, nn.AdaptiveAvgPool1d: pool_flops_counter_hook, nn.AdaptiveMaxPool2d: pool_flops_counter_hook, nn.AdaptiveAvgPool2d: pool_flops_counter_hook, nn.AdaptiveMaxPool3d: pool_flops_counter_hook, nn.AdaptiveAvgPool3d: pool_flops_counter_hook, nn.BatchNorm1d: norm_flops_counter_hook, nn.BatchNorm2d: norm_flops_counter_hook, nn.BatchNorm3d: norm_flops_counter_hook, nn.GroupNorm: norm_flops_counter_hook, nn.InstanceNorm1d: norm_flops_counter_hook, nn.InstanceNorm2d: norm_flops_counter_hook, nn.InstanceNorm3d: norm_flops_counter_hook, nn.LayerNorm: norm_flops_counter_hook, nn.Linear: linear_flops_counter_hook, mmcv.cnn.bricks.Linear: linear_flops_counter_hook, nn.Upsample: upsample_flops_counter_hook, nn.ConvTranspose2d: deconv_flops_counter_hook, mmcv.cnn.bricks.ConvTranspose2d: deconv_flops_counter_hook}
def _fuse_conv_bn(conv, bn): 'Fuse conv and bn into one module.\n\n Args:\n conv (nn.Module): Conv to be fused.\n bn (nn.Module): BN to be fused.\n\n Returns:\n nn.Module: Fused module.\n ' conv_w = conv.weight conv_b = (conv.bias if (conv.bias is not None) else torch.zeros_like(bn.running_mean)) factor = (bn.weight / torch.sqrt((bn.running_var + bn.eps))) conv.weight = nn.Parameter((conv_w * factor.reshape([conv.out_channels, 1, 1, 1]))) conv.bias = nn.Parameter((((conv_b - bn.running_mean) * factor) + bn.bias)) return conv
def fuse_conv_bn(module): 'Recursively fuse conv and bn in a module.\n\n During inference, the functionary of batch norm layers is turned off\n but only the mean and var alone channels are used, which exposes the\n chance to fuse it with the preceding conv layers to save computations and\n simplify network structures.\n\n Args:\n module (nn.Module): Module to be fused.\n\n Returns:\n nn.Module: Fused module.\n ' last_conv = None last_conv_name = None for (name, child) in module.named_children(): if isinstance(child, (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)): if (last_conv is None): continue fused_conv = _fuse_conv_bn(last_conv, child) module._modules[last_conv_name] = fused_conv module._modules[name] = nn.Identity() last_conv = None elif isinstance(child, nn.Conv2d): last_conv = child last_conv_name = name else: fuse_conv_bn(child) return module
class _BatchNormXd(torch.nn.modules.batchnorm._BatchNorm): "A general BatchNorm layer without input dimension check.\n\n Reproduced from @kapily's work:\n (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)\n The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc\n is `_check_input_dim` that is designed for tensor sanity checks.\n The check has been bypassed in this class for the convenience of converting\n SyncBatchNorm.\n " def _check_input_dim(self, input): return
def revert_sync_batchnorm(module): "Helper function to convert all `SyncBatchNorm` (SyncBN) and\n `mmcv.ops.sync_bn.SyncBatchNorm`(MMSyncBN) layers in the model to\n `BatchNormXd` layers.\n\n Adapted from @kapily's work:\n (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)\n\n Args:\n module (nn.Module): The module containing `SyncBatchNorm` layers.\n\n Returns:\n module_output: The converted module with `BatchNormXd` layers.\n " module_output = module module_checklist = [torch.nn.modules.batchnorm.SyncBatchNorm] if hasattr(mmcv, 'ops'): module_checklist.append(mmcv.ops.SyncBatchNorm) if isinstance(module, tuple(module_checklist)): module_output = _BatchNormXd(module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats) if module.affine: with torch.no_grad(): module_output.weight = module.weight module_output.bias = module.bias module_output.running_mean = module.running_mean module_output.running_var = module.running_var module_output.num_batches_tracked = module.num_batches_tracked module_output.training = module.training if hasattr(module, 'qconfig'): module_output.qconfig = module.qconfig for (name, child) in module.named_children(): module_output.add_module(name, revert_sync_batchnorm(child)) del module return module_output
def update_init_info(module, init_info): 'Update the `_params_init_info` in the module if the value of parameters\n are changed.\n\n Args:\n module (obj:`nn.Module`): The module of PyTorch with a user-defined\n attribute `_params_init_info` which records the initialization\n information.\n init_info (str): The string that describes the initialization.\n ' assert hasattr(module, '_params_init_info'), f'Can not find `_params_init_info` in {module}' for (name, param) in module.named_parameters(): assert (param in module._params_init_info), f'Find a new :obj:`Parameter` named `{name}` during executing the `init_weights` of `{module.__class__.__name__}`. Please do not add or replace parameters during executing the `init_weights`. ' mean_value = param.data.mean() if (module._params_init_info[param]['tmp_mean_value'] != mean_value): module._params_init_info[param]['init_info'] = init_info module._params_init_info[param]['tmp_mean_value'] = mean_value
def constant_init(module, val, bias=0): if (hasattr(module, 'weight') and (module.weight is not None)): nn.init.constant_(module.weight, val) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def xavier_init(module, gain=1, bias=0, distribution='normal'): assert (distribution in ['uniform', 'normal']) if (hasattr(module, 'weight') and (module.weight is not None)): if (distribution == 'uniform'): nn.init.xavier_uniform_(module.weight, gain=gain) else: nn.init.xavier_normal_(module.weight, gain=gain) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def normal_init(module, mean=0, std=1, bias=0): if (hasattr(module, 'weight') and (module.weight is not None)): nn.init.normal_(module.weight, mean, std) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def trunc_normal_init(module: nn.Module, mean: float=0, std: float=1, a: float=(- 2), b: float=2, bias: float=0) -> None: if (hasattr(module, 'weight') and (module.weight is not None)): trunc_normal_(module.weight, mean, std, a, b) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def uniform_init(module, a=0, b=1, bias=0): if (hasattr(module, 'weight') and (module.weight is not None)): nn.init.uniform_(module.weight, a, b) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def kaiming_init(module, a=0, mode='fan_out', nonlinearity='relu', bias=0, distribution='normal'): assert (distribution in ['uniform', 'normal']) if (hasattr(module, 'weight') and (module.weight is not None)): if (distribution == 'uniform'): nn.init.kaiming_uniform_(module.weight, a=a, mode=mode, nonlinearity=nonlinearity) else: nn.init.kaiming_normal_(module.weight, a=a, mode=mode, nonlinearity=nonlinearity) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def caffe2_xavier_init(module, bias=0): kaiming_init(module, a=1, mode='fan_in', nonlinearity='leaky_relu', bias=bias, distribution='uniform')
def bias_init_with_prob(prior_prob): 'initialize conv/fc bias value according to a given probability value.' bias_init = float((- np.log(((1 - prior_prob) / prior_prob)))) return bias_init
def _get_bases_name(m): return [b.__name__ for b in m.__class__.__bases__]
class BaseInit(object): def __init__(self, *, bias=0, bias_prob=None, layer=None): self.wholemodule = False if (not isinstance(bias, (int, float))): raise TypeError(f'bias must be a number, but got a {type(bias)}') if (bias_prob is not None): if (not isinstance(bias_prob, float)): raise TypeError(f'bias_prob type must be float, but got {type(bias_prob)}') if (layer is not None): if (not isinstance(layer, (str, list))): raise TypeError(f'layer must be a str or a list of str, but got a {type(layer)}') else: layer = [] if (bias_prob is not None): self.bias = bias_init_with_prob(bias_prob) else: self.bias = bias self.layer = ([layer] if isinstance(layer, str) else layer) def _get_init_info(self): info = f'{self.__class__.__name__}, bias={self.bias}' return info
@INITIALIZERS.register_module(name='Constant') class ConstantInit(BaseInit): 'Initialize module parameters with constant values.\n\n Args:\n val (int | float): the value to fill the weights in the module with\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n ' def __init__(self, val, **kwargs): super().__init__(**kwargs) self.val = val def __call__(self, module): def init(m): if self.wholemodule: constant_init(m, self.val, self.bias) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len((set(self.layer) & set(([layername] + basesname)))): constant_init(m, self.val, self.bias) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}' return info
@INITIALIZERS.register_module(name='Xavier') class XavierInit(BaseInit): "Initialize module parameters with values according to the method\n described in `Understanding the difficulty of training deep feedforward\n neural networks - Glorot, X. & Bengio, Y. (2010).\n <http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_\n\n Args:\n gain (int | float): an optional scaling factor. Defaults to 1.\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n distribution (str): distribution either be ``'normal'``\n or ``'uniform'``. Defaults to ``'normal'``.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n " def __init__(self, gain=1, distribution='normal', **kwargs): super().__init__(**kwargs) self.gain = gain self.distribution = distribution def __call__(self, module): def init(m): if self.wholemodule: xavier_init(m, self.gain, self.bias, self.distribution) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len((set(self.layer) & set(([layername] + basesname)))): xavier_init(m, self.gain, self.bias, self.distribution) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: gain={self.gain}, distribution={self.distribution}, bias={self.bias}' return info
@INITIALIZERS.register_module(name='Normal') class NormalInit(BaseInit): 'Initialize module parameters with the values drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`.\n\n Args:\n mean (int | float):the mean of the normal distribution. Defaults to 0.\n std (int | float): the standard deviation of the normal distribution.\n Defaults to 1.\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n\n ' def __init__(self, mean=0, std=1, **kwargs): super().__init__(**kwargs) self.mean = mean self.std = std def __call__(self, module): def init(m): if self.wholemodule: normal_init(m, self.mean, self.std, self.bias) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len((set(self.layer) & set(([layername] + basesname)))): normal_init(m, self.mean, self.std, self.bias) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: mean={self.mean}, std={self.std}, bias={self.bias}' return info
@INITIALIZERS.register_module(name='TruncNormal') class TruncNormalInit(BaseInit): 'Initialize module parameters with the values drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)` with values\n outside :math:`[a, b]`.\n\n Args:\n mean (float): the mean of the normal distribution. Defaults to 0.\n std (float): the standard deviation of the normal distribution.\n Defaults to 1.\n a (float): The minimum cutoff value.\n b ( float): The maximum cutoff value.\n bias (float): the value to fill the bias. Defaults to 0.\n bias_prob (float, optional): the probability for bias initialization.\n Defaults to None.\n layer (str | list[str], optional): the layer will be initialized.\n Defaults to None.\n\n ' def __init__(self, mean: float=0, std: float=1, a: float=(- 2), b: float=2, **kwargs) -> None: super().__init__(**kwargs) self.mean = mean self.std = std self.a = a self.b = b def __call__(self, module: nn.Module) -> None: def init(m): if self.wholemodule: trunc_normal_init(m, self.mean, self.std, self.a, self.b, self.bias) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len((set(self.layer) & set(([layername] + basesname)))): trunc_normal_init(m, self.mean, self.std, self.a, self.b, self.bias) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: a={self.a}, b={self.b}, mean={self.mean}, std={self.std}, bias={self.bias}' return info