|
|
|
|
|
from typing import Dict, List, Optional, Union
|
|
|
|
|
|
import torch.nn as nn
|
|
|
import torch.utils.checkpoint as cp
|
|
|
from mmcv.cnn import ConvModule
|
|
|
from mmengine.model import BaseModule
|
|
|
from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm
|
|
|
|
|
|
from mmaction.registry import MODELS
|
|
|
|
|
|
|
|
|
def make_divisible(value, divisor, min_value=None, min_ratio=0.9):
|
|
|
"""Make divisible function.
|
|
|
|
|
|
This function rounds the channel number down to the nearest value that can
|
|
|
be divisible by the divisor.
|
|
|
Args:
|
|
|
value (int): The original channel number.
|
|
|
divisor (int): The divisor to fully divide the channel number.
|
|
|
min_value (int, optional): The minimum value of the output channel.
|
|
|
Defaults to None, means that the minimum value equal to the
|
|
|
divisor.
|
|
|
min_ratio (float, optional): The minimum ratio of the rounded channel
|
|
|
number to the original channel number. Defaults to 0.9.
|
|
|
Returns:
|
|
|
int: The modified output channel number
|
|
|
"""
|
|
|
|
|
|
if min_value is None:
|
|
|
min_value = divisor
|
|
|
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
|
|
|
|
|
|
if new_value < min_ratio * value:
|
|
|
new_value += divisor
|
|
|
return new_value
|
|
|
|
|
|
|
|
|
class InvertedResidual(nn.Module):
|
|
|
"""InvertedResidual block for MobileNetV2.
|
|
|
|
|
|
Args:
|
|
|
in_channels (int): The input channels of the InvertedResidual block.
|
|
|
out_channels (int): The output channels of the InvertedResidual block.
|
|
|
stride (int): Stride of the middle (first) 3x3 convolution.
|
|
|
expand_ratio (int): adjusts number of channels of the hidden layer
|
|
|
in InvertedResidual by this amount.
|
|
|
conv_cfg (dict): Config dict for convolution layer.
|
|
|
Defaults to None, which means using conv2d.
|
|
|
norm_cfg (dict): Config dict for normalization layer.
|
|
|
Defaults to dict(type='BN').
|
|
|
act_cfg (dict): Config dict for activation layer.
|
|
|
Defaults to dict(type='ReLU6').
|
|
|
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
|
|
memory while slowing down the training speed. Defaults to False.
|
|
|
Returns:
|
|
|
Tensor: The output tensor
|
|
|
"""
|
|
|
|
|
|
def __init__(self,
|
|
|
in_channels,
|
|
|
out_channels,
|
|
|
stride,
|
|
|
expand_ratio,
|
|
|
conv_cfg=None,
|
|
|
norm_cfg=dict(type='BN'),
|
|
|
act_cfg=dict(type='ReLU6'),
|
|
|
with_cp=False):
|
|
|
super(InvertedResidual, self).__init__()
|
|
|
self.stride = stride
|
|
|
assert stride in [1, 2], f'stride must in [1, 2]. ' \
|
|
|
f'But received {stride}.'
|
|
|
self.with_cp = with_cp
|
|
|
self.use_res_connect = self.stride == 1 and in_channels == out_channels
|
|
|
hidden_dim = int(round(in_channels * expand_ratio))
|
|
|
|
|
|
layers = []
|
|
|
if expand_ratio != 1:
|
|
|
layers.append(
|
|
|
ConvModule(
|
|
|
in_channels=in_channels,
|
|
|
out_channels=hidden_dim,
|
|
|
kernel_size=1,
|
|
|
conv_cfg=conv_cfg,
|
|
|
norm_cfg=norm_cfg,
|
|
|
act_cfg=act_cfg))
|
|
|
layers.extend([
|
|
|
ConvModule(
|
|
|
in_channels=hidden_dim,
|
|
|
out_channels=hidden_dim,
|
|
|
kernel_size=3,
|
|
|
stride=stride,
|
|
|
padding=1,
|
|
|
groups=hidden_dim,
|
|
|
conv_cfg=conv_cfg,
|
|
|
norm_cfg=norm_cfg,
|
|
|
act_cfg=act_cfg),
|
|
|
ConvModule(
|
|
|
in_channels=hidden_dim,
|
|
|
out_channels=out_channels,
|
|
|
kernel_size=1,
|
|
|
conv_cfg=conv_cfg,
|
|
|
norm_cfg=norm_cfg,
|
|
|
act_cfg=None)
|
|
|
])
|
|
|
self.conv = nn.Sequential(*layers)
|
|
|
|
|
|
def forward(self, x):
|
|
|
"""Defines the computation performed at every call.
|
|
|
|
|
|
Args:
|
|
|
x (Tensor): The input data.
|
|
|
|
|
|
Returns:
|
|
|
Tensor: The output of the module.
|
|
|
"""
|
|
|
|
|
|
def _inner_forward(x):
|
|
|
if self.use_res_connect:
|
|
|
return x + self.conv(x)
|
|
|
|
|
|
return self.conv(x)
|
|
|
|
|
|
if self.with_cp and x.requires_grad:
|
|
|
out = cp.checkpoint(_inner_forward, x)
|
|
|
else:
|
|
|
out = _inner_forward(x)
|
|
|
|
|
|
return out
|
|
|
|
|
|
|
|
|
@MODELS.register_module()
|
|
|
class MobileNetV2(BaseModule):
|
|
|
"""MobileNetV2 backbone.
|
|
|
|
|
|
Args:
|
|
|
pretrained (str | None): Name of pretrained model. Defaults to None.
|
|
|
widen_factor (float): Width multiplier, multiply number of
|
|
|
channels in each layer by this amount. Defaults to 1.0.
|
|
|
out_indices (None or Sequence[int]): Output from which stages.
|
|
|
Defaults to (7, ).
|
|
|
frozen_stages (int): Stages to be frozen (all param fixed). Note that
|
|
|
the last stage in ``MobileNetV2`` is ``conv2``. Defaults to -1,
|
|
|
which means not freezing any parameters.
|
|
|
conv_cfg (dict): Config dict for convolution layer.
|
|
|
Defaults to None, which means using conv2d.
|
|
|
norm_cfg (dict): Config dict for normalization layer.
|
|
|
Defaults to dict(type='BN').
|
|
|
act_cfg (dict): Config dict for activation layer.
|
|
|
Defaults to dict(type='ReLU6').
|
|
|
norm_eval (bool): Whether to set norm layers to eval mode, namely,
|
|
|
freeze running stats (mean and var). Note: Effect on Batch Norm
|
|
|
and its variants only. Defaults to False.
|
|
|
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
|
|
memory while slowing down the training speed. Defaults to False.
|
|
|
init_cfg (dict or list[dict]): Initialization config dict. Defaults to
|
|
|
``[
|
|
|
dict(type='Kaiming', layer='Conv2d',),
|
|
|
dict(type='Constant', layer=['GroupNorm', '_BatchNorm'], val=1.)
|
|
|
]``.
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
|
|
|
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
|
|
|
[6, 320, 1, 1]]
|
|
|
|
|
|
def __init__(self,
|
|
|
pretrained=None,
|
|
|
widen_factor=1.,
|
|
|
out_indices=(7, ),
|
|
|
frozen_stages=-1,
|
|
|
conv_cfg=dict(type='Conv'),
|
|
|
norm_cfg=dict(type='BN2d', requires_grad=True),
|
|
|
act_cfg=dict(type='ReLU6', inplace=True),
|
|
|
norm_eval=False,
|
|
|
with_cp=False,
|
|
|
init_cfg: Optional[Union[Dict, List[Dict]]] = [
|
|
|
dict(type='Kaiming', layer='Conv2d'),
|
|
|
dict(
|
|
|
type='Constant',
|
|
|
layer=['GroupNorm', '_BatchNorm'],
|
|
|
val=1.)
|
|
|
]):
|
|
|
if pretrained is not None:
|
|
|
init_cfg = dict(type='Pretrained', checkpoint=pretrained)
|
|
|
super().__init__(init_cfg=init_cfg)
|
|
|
self.pretrained = pretrained
|
|
|
self.widen_factor = widen_factor
|
|
|
self.out_indices = out_indices
|
|
|
for index in out_indices:
|
|
|
if index not in range(0, 8):
|
|
|
raise ValueError('the item in out_indices must in '
|
|
|
f'range(0, 8). But received {index}')
|
|
|
|
|
|
if frozen_stages not in range(-1, 9):
|
|
|
raise ValueError('frozen_stages must be in range(-1, 9). '
|
|
|
f'But received {frozen_stages}')
|
|
|
self.out_indices = out_indices
|
|
|
self.frozen_stages = frozen_stages
|
|
|
self.conv_cfg = conv_cfg
|
|
|
self.norm_cfg = norm_cfg
|
|
|
self.act_cfg = act_cfg
|
|
|
self.norm_eval = norm_eval
|
|
|
self.with_cp = with_cp
|
|
|
|
|
|
self.in_channels = make_divisible(32 * widen_factor, 8)
|
|
|
|
|
|
self.conv1 = ConvModule(
|
|
|
in_channels=3,
|
|
|
out_channels=self.in_channels,
|
|
|
kernel_size=3,
|
|
|
stride=2,
|
|
|
padding=1,
|
|
|
conv_cfg=self.conv_cfg,
|
|
|
norm_cfg=self.norm_cfg,
|
|
|
act_cfg=self.act_cfg)
|
|
|
|
|
|
self.layers = []
|
|
|
|
|
|
for i, layer_cfg in enumerate(self.arch_settings):
|
|
|
expand_ratio, channel, num_blocks, stride = layer_cfg
|
|
|
out_channels = make_divisible(channel * widen_factor, 8)
|
|
|
inverted_res_layer = self.make_layer(
|
|
|
out_channels=out_channels,
|
|
|
num_blocks=num_blocks,
|
|
|
stride=stride,
|
|
|
expand_ratio=expand_ratio)
|
|
|
layer_name = f'layer{i + 1}'
|
|
|
self.add_module(layer_name, inverted_res_layer)
|
|
|
self.layers.append(layer_name)
|
|
|
|
|
|
if widen_factor > 1.0:
|
|
|
self.out_channel = int(1280 * widen_factor)
|
|
|
else:
|
|
|
self.out_channel = 1280
|
|
|
|
|
|
layer = ConvModule(
|
|
|
in_channels=self.in_channels,
|
|
|
out_channels=self.out_channel,
|
|
|
kernel_size=1,
|
|
|
stride=1,
|
|
|
padding=0,
|
|
|
conv_cfg=self.conv_cfg,
|
|
|
norm_cfg=self.norm_cfg,
|
|
|
act_cfg=self.act_cfg)
|
|
|
self.add_module('conv2', layer)
|
|
|
self.layers.append('conv2')
|
|
|
|
|
|
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
|
|
|
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
|
|
|
|
|
|
Args:
|
|
|
out_channels (int): out_channels of block.
|
|
|
num_blocks (int): number of blocks.
|
|
|
stride (int): stride of the first block. Defaults to 1
|
|
|
expand_ratio (int): Expand the number of channels of the
|
|
|
hidden layer in InvertedResidual by this ratio. Defaults to 6.
|
|
|
"""
|
|
|
layers = []
|
|
|
for i in range(num_blocks):
|
|
|
if i >= 1:
|
|
|
stride = 1
|
|
|
layers.append(
|
|
|
InvertedResidual(
|
|
|
self.in_channels,
|
|
|
out_channels,
|
|
|
stride,
|
|
|
expand_ratio=expand_ratio,
|
|
|
conv_cfg=self.conv_cfg,
|
|
|
norm_cfg=self.norm_cfg,
|
|
|
act_cfg=self.act_cfg,
|
|
|
with_cp=self.with_cp))
|
|
|
self.in_channels = out_channels
|
|
|
|
|
|
return nn.Sequential(*layers)
|
|
|
|
|
|
def forward(self, x):
|
|
|
"""Defines the computation performed at every call.
|
|
|
|
|
|
Args:
|
|
|
x (Tensor): The input data.
|
|
|
|
|
|
Returns:
|
|
|
Tensor or Tuple[Tensor]: The feature of the input samples extracted
|
|
|
by the backbone.
|
|
|
"""
|
|
|
x = self.conv1(x)
|
|
|
|
|
|
outs = []
|
|
|
for i, layer_name in enumerate(self.layers):
|
|
|
layer = getattr(self, layer_name)
|
|
|
x = layer(x)
|
|
|
if i in self.out_indices:
|
|
|
outs.append(x)
|
|
|
|
|
|
if len(outs) == 1:
|
|
|
return outs[0]
|
|
|
|
|
|
return tuple(outs)
|
|
|
|
|
|
def _freeze_stages(self):
|
|
|
"""Prevent all the parameters from being optimized before
|
|
|
``self.frozen_stages``."""
|
|
|
if self.frozen_stages >= 0:
|
|
|
self.conv1.eval()
|
|
|
for param in self.conv1.parameters():
|
|
|
param.requires_grad = False
|
|
|
for i in range(1, self.frozen_stages + 1):
|
|
|
layer_name = self.layers[i - 1]
|
|
|
layer = getattr(self, layer_name)
|
|
|
layer.eval()
|
|
|
for param in layer.parameters():
|
|
|
param.requires_grad = False
|
|
|
|
|
|
def train(self, mode=True):
|
|
|
"""Set the optimization status when training."""
|
|
|
super(MobileNetV2, self).train(mode)
|
|
|
self._freeze_stages()
|
|
|
if mode and self.norm_eval:
|
|
|
for m in self.modules():
|
|
|
if isinstance(m, _BatchNorm):
|
|
|
m.eval()
|
|
|
|