File size: 12,247 Bytes
d670799
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Optional, Union

import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm

from mmaction.registry import MODELS


def make_divisible(value, divisor, min_value=None, min_ratio=0.9):
    """Make divisible function.



    This function rounds the channel number down to the nearest value that can

    be divisible by the divisor.

    Args:

        value (int): The original channel number.

        divisor (int): The divisor to fully divide the channel number.

        min_value (int, optional): The minimum value of the output channel.

            Defaults to None, means that the minimum value equal to the

            divisor.

        min_ratio (float, optional): The minimum ratio of the rounded channel

            number to the original channel number. Defaults to 0.9.

    Returns:

        int: The modified output channel number

    """

    if min_value is None:
        min_value = divisor
    new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
    # Make sure that round down does not go down by more than (1-min_ratio).
    if new_value < min_ratio * value:
        new_value += divisor
    return new_value


class InvertedResidual(nn.Module):
    """InvertedResidual block for MobileNetV2.



    Args:

        in_channels (int): The input channels of the InvertedResidual block.

        out_channels (int): The output channels of the InvertedResidual block.

        stride (int): Stride of the middle (first) 3x3 convolution.

        expand_ratio (int): adjusts number of channels of the hidden layer

            in InvertedResidual by this amount.

        conv_cfg (dict): Config dict for convolution layer.

            Defaults to None, which means using conv2d.

        norm_cfg (dict): Config dict for normalization layer.

            Defaults to dict(type='BN').

        act_cfg (dict): Config dict for activation layer.

            Defaults to dict(type='ReLU6').

        with_cp (bool): Use checkpoint or not. Using checkpoint will save some

            memory while slowing down the training speed. Defaults to False.

    Returns:

        Tensor: The output tensor

    """

    def __init__(self,

                 in_channels,

                 out_channels,

                 stride,

                 expand_ratio,

                 conv_cfg=None,

                 norm_cfg=dict(type='BN'),

                 act_cfg=dict(type='ReLU6'),

                 with_cp=False):
        super(InvertedResidual, self).__init__()
        self.stride = stride
        assert stride in [1, 2], f'stride must in [1, 2]. ' \
            f'But received {stride}.'
        self.with_cp = with_cp
        self.use_res_connect = self.stride == 1 and in_channels == out_channels
        hidden_dim = int(round(in_channels * expand_ratio))

        layers = []
        if expand_ratio != 1:
            layers.append(
                ConvModule(
                    in_channels=in_channels,
                    out_channels=hidden_dim,
                    kernel_size=1,
                    conv_cfg=conv_cfg,
                    norm_cfg=norm_cfg,
                    act_cfg=act_cfg))
        layers.extend([
            ConvModule(
                in_channels=hidden_dim,
                out_channels=hidden_dim,
                kernel_size=3,
                stride=stride,
                padding=1,
                groups=hidden_dim,
                conv_cfg=conv_cfg,
                norm_cfg=norm_cfg,
                act_cfg=act_cfg),
            ConvModule(
                in_channels=hidden_dim,
                out_channels=out_channels,
                kernel_size=1,
                conv_cfg=conv_cfg,
                norm_cfg=norm_cfg,
                act_cfg=None)
        ])
        self.conv = nn.Sequential(*layers)

    def forward(self, x):
        """Defines the computation performed at every call.



        Args:

            x (Tensor): The input data.



        Returns:

            Tensor: The output of the module.

        """

        def _inner_forward(x):
            if self.use_res_connect:
                return x + self.conv(x)

            return self.conv(x)

        if self.with_cp and x.requires_grad:
            out = cp.checkpoint(_inner_forward, x)
        else:
            out = _inner_forward(x)

        return out


@MODELS.register_module()
class MobileNetV2(BaseModule):
    """MobileNetV2 backbone.



    Args:

        pretrained (str | None): Name of pretrained model. Defaults to None.

        widen_factor (float): Width multiplier, multiply number of

            channels in each layer by this amount. Defaults to 1.0.

        out_indices (None or Sequence[int]): Output from which stages.

            Defaults to (7, ).

        frozen_stages (int): Stages to be frozen (all param fixed). Note that

            the last stage in ``MobileNetV2`` is ``conv2``. Defaults to -1,

            which means not freezing any parameters.

        conv_cfg (dict): Config dict for convolution layer.

            Defaults to None, which means using conv2d.

        norm_cfg (dict): Config dict for normalization layer.

            Defaults to dict(type='BN').

        act_cfg (dict): Config dict for activation layer.

            Defaults to dict(type='ReLU6').

        norm_eval (bool): Whether to set norm layers to eval mode, namely,

            freeze running stats (mean and var). Note: Effect on Batch Norm

            and its variants only. Defaults to False.

        with_cp (bool): Use checkpoint or not. Using checkpoint will save some

            memory while slowing down the training speed. Defaults to False.

        init_cfg (dict or list[dict]): Initialization config dict. Defaults to

            ``[

            dict(type='Kaiming', layer='Conv2d',),

            dict(type='Constant', layer=['GroupNorm', '_BatchNorm'], val=1.)

            ]``.

    """

    # Parameters to build layers. 4 parameters are needed to construct a
    # layer, from left to right: expand_ratio, channel, num_blocks, stride.
    arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
                     [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
                     [6, 320, 1, 1]]

    def __init__(self,

                 pretrained=None,

                 widen_factor=1.,

                 out_indices=(7, ),

                 frozen_stages=-1,

                 conv_cfg=dict(type='Conv'),

                 norm_cfg=dict(type='BN2d', requires_grad=True),

                 act_cfg=dict(type='ReLU6', inplace=True),

                 norm_eval=False,

                 with_cp=False,

                 init_cfg: Optional[Union[Dict, List[Dict]]] = [

                     dict(type='Kaiming', layer='Conv2d'),

                     dict(

                         type='Constant',

                         layer=['GroupNorm', '_BatchNorm'],

                         val=1.)

                 ]):
        if pretrained is not None:
            init_cfg = dict(type='Pretrained', checkpoint=pretrained)
        super().__init__(init_cfg=init_cfg)
        self.pretrained = pretrained
        self.widen_factor = widen_factor
        self.out_indices = out_indices
        for index in out_indices:
            if index not in range(0, 8):
                raise ValueError('the item in out_indices must in '
                                 f'range(0, 8). But received {index}')

        if frozen_stages not in range(-1, 9):
            raise ValueError('frozen_stages must be in range(-1, 9). '
                             f'But received {frozen_stages}')
        self.out_indices = out_indices
        self.frozen_stages = frozen_stages
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.act_cfg = act_cfg
        self.norm_eval = norm_eval
        self.with_cp = with_cp

        self.in_channels = make_divisible(32 * widen_factor, 8)

        self.conv1 = ConvModule(
            in_channels=3,
            out_channels=self.in_channels,
            kernel_size=3,
            stride=2,
            padding=1,
            conv_cfg=self.conv_cfg,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg)

        self.layers = []

        for i, layer_cfg in enumerate(self.arch_settings):
            expand_ratio, channel, num_blocks, stride = layer_cfg
            out_channels = make_divisible(channel * widen_factor, 8)
            inverted_res_layer = self.make_layer(
                out_channels=out_channels,
                num_blocks=num_blocks,
                stride=stride,
                expand_ratio=expand_ratio)
            layer_name = f'layer{i + 1}'
            self.add_module(layer_name, inverted_res_layer)
            self.layers.append(layer_name)

        if widen_factor > 1.0:
            self.out_channel = int(1280 * widen_factor)
        else:
            self.out_channel = 1280

        layer = ConvModule(
            in_channels=self.in_channels,
            out_channels=self.out_channel,
            kernel_size=1,
            stride=1,
            padding=0,
            conv_cfg=self.conv_cfg,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg)
        self.add_module('conv2', layer)
        self.layers.append('conv2')

    def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
        """Stack InvertedResidual blocks to build a layer for MobileNetV2.



        Args:

            out_channels (int): out_channels of block.

            num_blocks (int): number of blocks.

            stride (int): stride of the first block. Defaults to 1

            expand_ratio (int): Expand the number of channels of the

                hidden layer in InvertedResidual by this ratio. Defaults to 6.

        """
        layers = []
        for i in range(num_blocks):
            if i >= 1:
                stride = 1
            layers.append(
                InvertedResidual(
                    self.in_channels,
                    out_channels,
                    stride,
                    expand_ratio=expand_ratio,
                    conv_cfg=self.conv_cfg,
                    norm_cfg=self.norm_cfg,
                    act_cfg=self.act_cfg,
                    with_cp=self.with_cp))
            self.in_channels = out_channels

        return nn.Sequential(*layers)

    def forward(self, x):
        """Defines the computation performed at every call.



        Args:

            x (Tensor): The input data.



        Returns:

            Tensor or Tuple[Tensor]: The feature of the input samples extracted

            by the backbone.

        """
        x = self.conv1(x)

        outs = []
        for i, layer_name in enumerate(self.layers):
            layer = getattr(self, layer_name)
            x = layer(x)
            if i in self.out_indices:
                outs.append(x)

        if len(outs) == 1:
            return outs[0]

        return tuple(outs)

    def _freeze_stages(self):
        """Prevent all the parameters from being optimized before

        ``self.frozen_stages``."""
        if self.frozen_stages >= 0:
            self.conv1.eval()
            for param in self.conv1.parameters():
                param.requires_grad = False
        for i in range(1, self.frozen_stages + 1):
            layer_name = self.layers[i - 1]
            layer = getattr(self, layer_name)
            layer.eval()
            for param in layer.parameters():
                param.requires_grad = False

    def train(self, mode=True):
        """Set the optimization status when training."""
        super(MobileNetV2, self).train(mode)
        self._freeze_stages()
        if mode and self.norm_eval:
            for m in self.modules():
                if isinstance(m, _BatchNorm):
                    m.eval()