| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| from __future__ import absolute_import |
| from __future__ import division |
| from __future__ import print_function |
|
|
| import paddle |
| import paddle.nn as nn |
| import paddle.nn.functional as F |
| from paddle.nn.initializer import Normal, Constant |
| from paddle import ParamAttr |
| from paddle.nn import AdaptiveAvgPool2D, BatchNorm2D, Conv2D, Linear |
| from paddle.regularizer import L2Decay |
| from paddle.nn.initializer import KaimingNormal, XavierNormal |
| from ppdet.core.workspace import register |
|
|
| __all__ = ['PPLCNetEmbedding'] |
|
|
|
|
| |
| |
| |
| |
| |
| |
|
|
| NET_CONFIG = { |
| "blocks2": |
| |
| [[3, 16, 32, 1, False]], |
| "blocks3": [[3, 32, 64, 2, False], [3, 64, 64, 1, False]], |
| "blocks4": [[3, 64, 128, 2, False], [3, 128, 128, 1, False]], |
| "blocks5": [[3, 128, 256, 2, False], [5, 256, 256, 1, False], |
| [5, 256, 256, 1, False], [5, 256, 256, 1, False], |
| [5, 256, 256, 1, False], [5, 256, 256, 1, False]], |
| "blocks6": [[5, 256, 512, 2, True], [5, 512, 512, 1, True]] |
| } |
|
|
|
|
| def make_divisible(v, divisor=8, min_value=None): |
| if min_value is None: |
| min_value = divisor |
| new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) |
| if new_v < 0.9 * v: |
| new_v += divisor |
| return new_v |
|
|
|
|
| class ConvBNLayer(nn.Layer): |
| def __init__(self, |
| num_channels, |
| filter_size, |
| num_filters, |
| stride, |
| num_groups=1): |
| super().__init__() |
|
|
| self.conv = Conv2D( |
| in_channels=num_channels, |
| out_channels=num_filters, |
| kernel_size=filter_size, |
| stride=stride, |
| padding=(filter_size - 1) // 2, |
| groups=num_groups, |
| weight_attr=ParamAttr(initializer=KaimingNormal()), |
| bias_attr=False) |
|
|
| self.bn = BatchNorm2D( |
| num_filters, |
| weight_attr=ParamAttr(regularizer=L2Decay(0.0)), |
| bias_attr=ParamAttr(regularizer=L2Decay(0.0))) |
| self.hardswish = nn.Hardswish() |
|
|
| def forward(self, x): |
| x = self.conv(x) |
| x = self.bn(x) |
| x = self.hardswish(x) |
| return x |
|
|
|
|
| class DepthwiseSeparable(nn.Layer): |
| def __init__(self, |
| num_channels, |
| num_filters, |
| stride, |
| dw_size=3, |
| use_se=False): |
| super().__init__() |
| self.use_se = use_se |
| self.dw_conv = ConvBNLayer( |
| num_channels=num_channels, |
| num_filters=num_channels, |
| filter_size=dw_size, |
| stride=stride, |
| num_groups=num_channels) |
| if use_se: |
| self.se = SEModule(num_channels) |
| self.pw_conv = ConvBNLayer( |
| num_channels=num_channels, |
| filter_size=1, |
| num_filters=num_filters, |
| stride=1) |
|
|
| def forward(self, x): |
| x = self.dw_conv(x) |
| if self.use_se: |
| x = self.se(x) |
| x = self.pw_conv(x) |
| return x |
|
|
|
|
| class SEModule(nn.Layer): |
| def __init__(self, channel, reduction=4): |
| super().__init__() |
| self.avg_pool = AdaptiveAvgPool2D(1) |
| self.conv1 = Conv2D( |
| in_channels=channel, |
| out_channels=channel // reduction, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
| self.relu = nn.ReLU() |
| self.conv2 = Conv2D( |
| in_channels=channel // reduction, |
| out_channels=channel, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
| self.hardsigmoid = nn.Hardsigmoid() |
|
|
| def forward(self, x): |
| identity = x |
| x = self.avg_pool(x) |
| x = self.conv1(x) |
| x = self.relu(x) |
| x = self.conv2(x) |
| x = self.hardsigmoid(x) |
| x = paddle.multiply(x=identity, y=x) |
| return x |
|
|
|
|
| class PPLCNet(nn.Layer): |
| """ |
| PP-LCNet, see https://arxiv.org/abs/2109.15099. |
| This code is different from PPLCNet in ppdet/modeling/backbones/lcnet.py |
| or in PaddleClas, because the output is the flatten feature of last_conv. |
| |
| Args: |
| scale (float): Scale ratio of channels. |
| class_expand (int): Number of channels of conv feature. |
| """ |
|
|
| def __init__(self, scale=1.0, class_expand=1280): |
| super(PPLCNet, self).__init__() |
| self.scale = scale |
| self.class_expand = class_expand |
|
|
| self.conv1 = ConvBNLayer( |
| num_channels=3, |
| filter_size=3, |
| num_filters=make_divisible(16 * scale), |
| stride=2) |
|
|
| self.blocks2 = nn.Sequential(*[ |
| DepthwiseSeparable( |
| num_channels=make_divisible(in_c * scale), |
| num_filters=make_divisible(out_c * scale), |
| dw_size=k, |
| stride=s, |
| use_se=se) |
| for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks2"]) |
| ]) |
|
|
| self.blocks3 = nn.Sequential(*[ |
| DepthwiseSeparable( |
| num_channels=make_divisible(in_c * scale), |
| num_filters=make_divisible(out_c * scale), |
| dw_size=k, |
| stride=s, |
| use_se=se) |
| for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks3"]) |
| ]) |
|
|
| self.blocks4 = nn.Sequential(*[ |
| DepthwiseSeparable( |
| num_channels=make_divisible(in_c * scale), |
| num_filters=make_divisible(out_c * scale), |
| dw_size=k, |
| stride=s, |
| use_se=se) |
| for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks4"]) |
| ]) |
|
|
| self.blocks5 = nn.Sequential(*[ |
| DepthwiseSeparable( |
| num_channels=make_divisible(in_c * scale), |
| num_filters=make_divisible(out_c * scale), |
| dw_size=k, |
| stride=s, |
| use_se=se) |
| for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks5"]) |
| ]) |
|
|
| self.blocks6 = nn.Sequential(*[ |
| DepthwiseSeparable( |
| num_channels=make_divisible(in_c * scale), |
| num_filters=make_divisible(out_c * scale), |
| dw_size=k, |
| stride=s, |
| use_se=se) |
| for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks6"]) |
| ]) |
|
|
| self.avg_pool = AdaptiveAvgPool2D(1) |
| self.last_conv = Conv2D( |
| in_channels=make_divisible(NET_CONFIG["blocks6"][-1][2] * scale), |
| out_channels=self.class_expand, |
| kernel_size=1, |
| stride=1, |
| padding=0, |
| bias_attr=False) |
| self.hardswish = nn.Hardswish() |
| self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) |
|
|
| def forward(self, x): |
| x = self.conv1(x) |
|
|
| x = self.blocks2(x) |
| x = self.blocks3(x) |
| x = self.blocks4(x) |
| x = self.blocks5(x) |
| x = self.blocks6(x) |
|
|
| x = self.avg_pool(x) |
| x = self.last_conv(x) |
| x = self.hardswish(x) |
| x = self.flatten(x) |
| return x |
|
|
|
|
| class FC(nn.Layer): |
| def __init__(self, input_ch, output_ch): |
| super(FC, self).__init__() |
| weight_attr = ParamAttr(initializer=XavierNormal()) |
| self.fc = paddle.nn.Linear(input_ch, output_ch, weight_attr=weight_attr) |
|
|
| def forward(self, x): |
| out = self.fc(x) |
| return out |
|
|
|
|
| @register |
| class PPLCNetEmbedding(nn.Layer): |
| """ |
| PPLCNet Embedding |
| |
| Args: |
| input_ch (int): Number of channels of input conv feature. |
| output_ch (int): Number of channels of output conv feature. |
| """ |
| def __init__(self, scale=2.5, input_ch=1280, output_ch=512): |
| super(PPLCNetEmbedding, self).__init__() |
| self.backbone = PPLCNet(scale=scale) |
| self.neck = FC(input_ch, output_ch) |
|
|
| def forward(self, x): |
| feat = self.backbone(x) |
| feat_out = self.neck(feat) |
| return feat_out |
|
|