repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/fishnet.py
|
"""
FishNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
"""
__all__ = ['FishNet', 'fishnet99', 'fishnet150', 'ChannelSqueeze']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1, SesquialteralHourglass, Identity, InterpolationBlock
from .preresnet import PreResActivation
from .senet import SEInitBlock
def channel_squeeze(x,
groups):
"""
Channel squeeze operation.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns:
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
channels_per_group = channels // groups
x = x.view(batch, channels_per_group, groups, height, width).sum(dim=2)
return x
class ChannelSqueeze(nn.Module):
"""
Channel squeeze layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelSqueeze, self).__init__()
if channels % groups != 0:
raise ValueError("channels must be divisible by groups")
self.groups = groups
def forward(self, x):
return channel_squeeze(x, self.groups)
class PreSEAttBlock(nn.Module):
"""
FishNet specific Squeeze-and-Excitation attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
reduction : int, default 16
Squeeze reduction value.
"""
def __init__(self,
in_channels,
out_channels,
reduction=16):
super(PreSEAttBlock, self).__init__()
mid_cannels = out_channels // reduction
self.bn = nn.BatchNorm2d(num_features=in_channels)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_cannels,
bias=True)
self.conv2 = conv1x1(
in_channels=mid_cannels,
out_channels=out_channels,
bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.bn(x)
x = self.relu(x)
x = self.pool(x)
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.sigmoid(x)
return x
class FishBottleneck(nn.Module):
"""
FishNet bottleneck block for residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
stride,
dilation):
super(FishBottleneck, self).__init__()
mid_channels = out_channels // 4
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
padding=dilation,
dilation=dilation)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class FishBlock(nn.Module):
"""
FishNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
squeeze : bool, default False
Whether to use a channel squeeze operation.
"""
def __init__(self,
in_channels,
out_channels,
stride=1,
dilation=1,
squeeze=False):
super(FishBlock, self).__init__()
self.squeeze = squeeze
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = FishBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
dilation=dilation)
if self.squeeze:
assert (in_channels // 2 == out_channels)
self.c_squeeze = ChannelSqueeze(
channels=in_channels,
groups=2)
elif self.resize_identity:
self.identity_conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
def forward(self, x):
if self.squeeze:
identity = self.c_squeeze(x)
elif self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
return x
class DownUnit(nn.Module):
"""
FishNet down unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
"""
def __init__(self,
in_channels,
out_channels_list):
super(DownUnit, self).__init__()
self.blocks = nn.Sequential()
for i, out_channels in enumerate(out_channels_list):
self.blocks.add_module("block{}".format(i + 1), FishBlock(
in_channels=in_channels,
out_channels=out_channels))
in_channels = out_channels
self.pool = nn.MaxPool2d(
kernel_size=2,
stride=2)
def forward(self, x):
x = self.blocks(x)
x = self.pool(x)
return x
class UpUnit(nn.Module):
"""
FishNet up unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels_list,
dilation=1):
super(UpUnit, self).__init__()
self.blocks = nn.Sequential()
for i, out_channels in enumerate(out_channels_list):
squeeze = (dilation > 1) and (i == 0)
self.blocks.add_module("block{}".format(i + 1), FishBlock(
in_channels=in_channels,
out_channels=out_channels,
dilation=dilation,
squeeze=squeeze))
in_channels = out_channels
self.upsample = InterpolationBlock(scale_factor=2, mode="nearest", align_corners=None)
def forward(self, x):
x = self.blocks(x)
x = self.upsample(x)
return x
class SkipUnit(nn.Module):
"""
FishNet skip connection unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
"""
def __init__(self,
in_channels,
out_channels_list):
super(SkipUnit, self).__init__()
self.blocks = nn.Sequential()
for i, out_channels in enumerate(out_channels_list):
self.blocks.add_module("block{}".format(i + 1), FishBlock(
in_channels=in_channels,
out_channels=out_channels))
in_channels = out_channels
def forward(self, x):
x = self.blocks(x)
return x
class SkipAttUnit(nn.Module):
"""
FishNet skip connection unit with attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
"""
def __init__(self,
in_channels,
out_channels_list):
super(SkipAttUnit, self).__init__()
mid_channels1 = in_channels // 2
mid_channels2 = 2 * in_channels
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels1)
self.conv2 = pre_conv1x1_block(
in_channels=mid_channels1,
out_channels=mid_channels2,
bias=True)
in_channels = mid_channels2
self.se = PreSEAttBlock(
in_channels=mid_channels2,
out_channels=out_channels_list[-1])
self.blocks = nn.Sequential()
for i, out_channels in enumerate(out_channels_list):
self.blocks.add_module("block{}".format(i + 1), FishBlock(
in_channels=in_channels,
out_channels=out_channels))
in_channels = out_channels
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
w = self.se(x)
x = self.blocks(x)
x = x * w + w
return x
class FishFinalBlock(nn.Module):
"""
FishNet final block.
Parameters:
----------
in_channels : int
Number of input channels.
"""
def __init__(self,
in_channels):
super(FishFinalBlock, self).__init__()
mid_channels = in_channels // 2
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.preactiv = PreResActivation(
in_channels=mid_channels)
def forward(self, x):
x = self.conv1(x)
x = self.preactiv(x)
return x
class FishNet(nn.Module):
"""
FishNet model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
direct_channels : list of list of list of int
Number of output channels for each unit along the straight path.
skip_channels : list of list of list of int
Number of output channels for each skip connection unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
direct_channels,
skip_channels,
init_block_channels,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(FishNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
depth = len(direct_channels[0])
down1_channels = direct_channels[0]
up_channels = direct_channels[1]
down2_channels = direct_channels[2]
skip1_channels = skip_channels[0]
skip2_channels = skip_channels[1]
self.features = nn.Sequential()
self.features.add_module("init_block", SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
down1_seq = nn.Sequential()
skip1_seq = nn.Sequential()
for i in range(depth + 1):
skip1_channels_list = skip1_channels[i]
if i < depth:
skip1_seq.add_module("unit{}".format(i + 1), SkipUnit(
in_channels=in_channels,
out_channels_list=skip1_channels_list))
down1_channels_list = down1_channels[i]
down1_seq.add_module("unit{}".format(i + 1), DownUnit(
in_channels=in_channels,
out_channels_list=down1_channels_list))
in_channels = down1_channels_list[-1]
else:
skip1_seq.add_module("unit{}".format(i + 1), SkipAttUnit(
in_channels=in_channels,
out_channels_list=skip1_channels_list))
in_channels = skip1_channels_list[-1]
up_seq = nn.Sequential()
skip2_seq = nn.Sequential()
for i in range(depth + 1):
skip2_channels_list = skip2_channels[i]
if i > 0:
in_channels += skip1_channels[depth - i][-1]
if i < depth:
skip2_seq.add_module("unit{}".format(i + 1), SkipUnit(
in_channels=in_channels,
out_channels_list=skip2_channels_list))
up_channels_list = up_channels[i]
dilation = 2 ** i
up_seq.add_module("unit{}".format(i + 1), UpUnit(
in_channels=in_channels,
out_channels_list=up_channels_list,
dilation=dilation))
in_channels = up_channels_list[-1]
else:
skip2_seq.add_module("unit{}".format(i + 1), Identity())
down2_seq = nn.Sequential()
for i in range(depth):
down2_channels_list = down2_channels[i]
down2_seq.add_module("unit{}".format(i + 1), DownUnit(
in_channels=in_channels,
out_channels_list=down2_channels_list))
in_channels = down2_channels_list[-1] + skip2_channels[depth - 1 - i][-1]
self.features.add_module("hg", SesquialteralHourglass(
down1_seq=down1_seq,
skip1_seq=skip1_seq,
up_seq=up_seq,
skip2_seq=skip2_seq,
down2_seq=down2_seq))
self.features.add_module("final_block", FishFinalBlock(in_channels=in_channels))
in_channels = in_channels // 2
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Sequential()
self.output.add_module("final_conv", conv1x1(
in_channels=in_channels,
out_channels=num_classes,
bias=True))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.output(x)
x = x.view(x.size(0), -1)
return x
def get_fishnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create FishNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 99:
direct_layers = [[2, 2, 6], [1, 1, 1], [1, 2, 2]]
skip_layers = [[1, 1, 1, 2], [4, 1, 1, 0]]
elif blocks == 150:
direct_layers = [[2, 4, 8], [2, 2, 2], [2, 2, 4]]
skip_layers = [[2, 2, 2, 4], [4, 2, 2, 0]]
else:
raise ValueError("Unsupported FishNet with number of blocks: {}".format(blocks))
direct_channels_per_layers = [[128, 256, 512], [512, 384, 256], [320, 832, 1600]]
skip_channels_per_layers = [[64, 128, 256, 512], [512, 768, 512, 0]]
direct_channels = [[[b] * c for (b, c) in zip(*a)] for a in
([(ci, li) for (ci, li) in zip(direct_channels_per_layers, direct_layers)])]
skip_channels = [[[b] * c for (b, c) in zip(*a)] for a in
([(ci, li) for (ci, li) in zip(skip_channels_per_layers, skip_layers)])]
init_block_channels = 64
net = FishNet(
direct_channels=direct_channels,
skip_channels=skip_channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def fishnet99(**kwargs):
"""
FishNet-99 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_fishnet(blocks=99, model_name="fishnet99", **kwargs)
def fishnet150(**kwargs):
"""
FishNet-150 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_fishnet(blocks=150, model_name="fishnet150", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
fishnet99,
fishnet150,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fishnet99 or weight_count == 16628904)
assert (model != fishnet150 or weight_count == 24959400)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 19,302
| 30.033762
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/hrnet.py
|
"""
HRNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
"""
__all__ = ['hrnet_w18_small_v1', 'hrnet_w18_small_v2', 'hrnetv2_w18', 'hrnetv2_w30', 'hrnetv2_w32', 'hrnetv2_w40',
'hrnetv2_w44', 'hrnetv2_w48', 'hrnetv2_w64']
import os
import torch.nn as nn
from .common import conv1x1_block, conv3x3_block, Identity
from .resnet import ResUnit
class UpSamplingBlock(nn.Module):
"""
HFNet specific upsampling block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
scale_factor : int
Multiplier for spatial size.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor):
super(UpSamplingBlock, self).__init__()
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=1,
activation=None)
self.upsample = nn.Upsample(
scale_factor=scale_factor,
mode="nearest")
def forward(self, x):
x = self.conv(x)
x = self.upsample(x)
return x
class HRBlock(nn.Module):
"""
HFNet block.
Parameters:
----------
in_channels_list : list of int
Number of input channels.
out_channels_list : list of int
Number of output channels.
num_branches : int
Number of branches.
num_subblocks : list of int
Number of subblock.
"""
def __init__(self,
in_channels_list,
out_channels_list,
num_branches,
num_subblocks):
super(HRBlock, self).__init__()
self.in_channels_list = in_channels_list
self.num_branches = num_branches
self.branches = nn.Sequential()
for i in range(num_branches):
layers = nn.Sequential()
in_channels_i = self.in_channels_list[i]
out_channels_i = out_channels_list[i]
for j in range(num_subblocks[i]):
layers.add_module("unit{}".format(j + 1), ResUnit(
in_channels=in_channels_i,
out_channels=out_channels_i,
stride=1,
bottleneck=False))
in_channels_i = out_channels_i
self.in_channels_list[i] = out_channels_i
self.branches.add_module("branch{}".format(i + 1), layers)
if num_branches > 1:
self.fuse_layers = nn.Sequential()
for i in range(num_branches):
fuse_layer = nn.Sequential()
for j in range(num_branches):
if j > i:
fuse_layer.add_module("block{}".format(j + 1), UpSamplingBlock(
in_channels=in_channels_list[j],
out_channels=in_channels_list[i],
scale_factor=2 ** (j - i)))
elif j == i:
fuse_layer.add_module("block{}".format(j + 1), Identity())
else:
conv3x3_seq = nn.Sequential()
for k in range(i - j):
if k == i - j - 1:
conv3x3_seq.add_module("subblock{}".format(k + 1), conv3x3_block(
in_channels=in_channels_list[j],
out_channels=in_channels_list[i],
stride=2,
activation=None))
else:
conv3x3_seq.add_module("subblock{}".format(k + 1), conv3x3_block(
in_channels=in_channels_list[j],
out_channels=in_channels_list[j],
stride=2))
fuse_layer.add_module("block{}".format(j + 1), conv3x3_seq)
self.fuse_layers.add_module("layer{}".format(i + 1), fuse_layer)
self.activ = nn.ReLU(True)
def forward(self, x):
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
if self.num_branches == 1:
return x
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.activ(y))
return x_fuse
class HRStage(nn.Module):
"""
HRNet stage block.
Parameters:
----------
in_channels_list : list of int
Number of output channels from the previous layer.
out_channels_list : list of int
Number of output channels in the current layer.
num_modules : int
Number of modules.
num_branches : int
Number of branches.
num_subblocks : list of int
Number of subblocks.
"""
def __init__(self,
in_channels_list,
out_channels_list,
num_modules,
num_branches,
num_subblocks):
super(HRStage, self).__init__()
self.branches = num_branches
self.in_channels_list = out_channels_list
in_branches = len(in_channels_list)
out_branches = len(out_channels_list)
self.transition = nn.Sequential()
for i in range(out_branches):
if i < in_branches:
if out_channels_list[i] != in_channels_list[i]:
self.transition.add_module("block{}".format(i + 1), conv3x3_block(
in_channels=in_channels_list[i],
out_channels=out_channels_list[i],
stride=1))
else:
self.transition.add_module("block{}".format(i + 1), Identity())
else:
conv3x3_seq = nn.Sequential()
for j in range(i + 1 - in_branches):
in_channels_i = in_channels_list[-1]
out_channels_i = out_channels_list[i] if j == i - in_branches else in_channels_i
conv3x3_seq.add_module("subblock{}".format(j + 1), conv3x3_block(
in_channels=in_channels_i,
out_channels=out_channels_i,
stride=2))
self.transition.add_module("block{}".format(i + 1), conv3x3_seq)
self.layers = nn.Sequential()
for i in range(num_modules):
self.layers.add_module("block{}".format(i + 1), HRBlock(
in_channels_list=self.in_channels_list,
out_channels_list=out_channels_list,
num_branches=num_branches,
num_subblocks=num_subblocks))
self.in_channels_list = self.layers[-1].in_channels_list
def forward(self, x):
x_list = []
for j in range(self.branches):
if not isinstance(self.transition[j], Identity):
x_list.append(self.transition[j](x[-1] if type(x) is list else x))
else:
x_list_j = x[j] if type(x) is list else x
x_list.append(x_list_j)
y_list = self.layers(x_list)
return y_list
class HRInitBlock(nn.Module):
"""
HRNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
num_subblocks : int
Number of subblocks.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
num_subblocks):
super(HRInitBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=2)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=2)
in_channels = mid_channels
self.subblocks = nn.Sequential()
for i in range(num_subblocks):
self.subblocks.add_module("block{}".format(i + 1), ResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=1,
bottleneck=True))
in_channels = out_channels
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.subblocks(x)
return x
class HRFinalBlock(nn.Module):
"""
HRNet specific final block.
Parameters:
----------
in_channels_list : list of int
Number of input channels per stage.
out_channels_list : list of int
Number of output channels per stage.
"""
def __init__(self,
in_channels_list,
out_channels_list):
super(HRFinalBlock, self).__init__()
self.inc_blocks = nn.Sequential()
for i, in_channels_i in enumerate(in_channels_list):
self.inc_blocks.add_module("block{}".format(i + 1), ResUnit(
in_channels=in_channels_i,
out_channels=out_channels_list[i],
stride=1,
bottleneck=True))
self.down_blocks = nn.Sequential()
for i in range(len(in_channels_list) - 1):
self.down_blocks.add_module("block{}".format(i + 1), conv3x3_block(
in_channels=out_channels_list[i],
out_channels=out_channels_list[i + 1],
stride=2,
bias=True))
self.final_layer = conv1x1_block(
in_channels=1024,
out_channels=2048,
stride=1,
bias=True)
def forward(self, x):
y = self.inc_blocks[0](x[0])
for i in range(len(self.down_blocks)):
y = self.inc_blocks[i + 1](x[i + 1]) + self.down_blocks[i](y)
y = self.final_layer(y)
return y
class HRNet(nn.Module):
"""
HRNet model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
channels : list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
init_num_subblocks : int
Number of subblocks in the initial unit.
num_modules : int
Number of modules per stage.
num_subblocks : list of int
Number of subblocks per stage.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
init_num_subblocks,
num_modules,
num_subblocks,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(HRNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.branches = [2, 3, 4]
self.features = nn.Sequential()
self.features.add_module("init_block", HRInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
mid_channels=64,
num_subblocks=init_num_subblocks))
in_channels_list = [init_block_channels]
for i in range(len(self.branches)):
self.features.add_module("stage{}".format(i + 1), HRStage(
in_channels_list=in_channels_list,
out_channels_list=channels[i],
num_modules=num_modules[i],
num_branches=self.branches[i],
num_subblocks=num_subblocks[i]))
in_channels_list = self.features[-1].in_channels_list
self.features.add_module("final_block", HRFinalBlock(
in_channels_list=in_channels_list,
out_channels_list=[128, 256, 512, 1024]))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=2048,
out_features=num_classes)
self._init_params()
def _init_params(self):
for module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight, mode="fan_out", nonlinearity="relu")
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.BatchNorm2d):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_hrnet(version,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create HRNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('s' or 'm').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if version == "w18s1":
init_block_channels = 128
init_num_subblocks = 1
channels = [[16, 32], [16, 32, 64], [16, 32, 64, 128]]
num_modules = [1, 1, 1]
elif version == "w18s2":
init_block_channels = 256
init_num_subblocks = 2
channels = [[18, 36], [18, 36, 72], [18, 36, 72, 144]]
num_modules = [1, 3, 2]
elif version == "w18":
init_block_channels = 256
init_num_subblocks = 4
channels = [[18, 36], [18, 36, 72], [18, 36, 72, 144]]
num_modules = [1, 4, 3]
elif version == "w30":
init_block_channels = 256
init_num_subblocks = 4
channels = [[30, 60], [30, 60, 120], [30, 60, 120, 240]]
num_modules = [1, 4, 3]
elif version == "w32":
init_block_channels = 256
init_num_subblocks = 4
channels = [[32, 64], [32, 64, 128], [32, 64, 128, 256]]
num_modules = [1, 4, 3]
elif version == "w40":
init_block_channels = 256
init_num_subblocks = 4
channels = [[40, 80], [40, 80, 160], [40, 80, 160, 320]]
num_modules = [1, 4, 3]
elif version == "w44":
init_block_channels = 256
init_num_subblocks = 4
channels = [[44, 88], [44, 88, 176], [44, 88, 176, 352]]
num_modules = [1, 4, 3]
elif version == "w48":
init_block_channels = 256
init_num_subblocks = 4
channels = [[48, 96], [48, 96, 192], [48, 96, 192, 384]]
num_modules = [1, 4, 3]
elif version == "w64":
init_block_channels = 256
init_num_subblocks = 4
channels = [[64, 128], [64, 128, 256], [64, 128, 256, 512]]
num_modules = [1, 4, 3]
else:
raise ValueError("Unsupported HRNet version {}".format(version))
num_subblocks = [[max(2, init_num_subblocks)] * len(ci) for ci in channels]
net = HRNet(
channels=channels,
init_block_channels=init_block_channels,
init_num_subblocks=init_num_subblocks,
num_modules=num_modules,
num_subblocks=num_subblocks,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def hrnet_w18_small_v1(**kwargs):
"""
HRNet-W18 Small V1 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18s1", model_name="hrnet_w18_small_v1", **kwargs)
def hrnet_w18_small_v2(**kwargs):
"""
HRNet-W18 Small V2 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18s2", model_name="hrnet_w18_small_v2", **kwargs)
def hrnetv2_w18(**kwargs):
"""
HRNetV2-W18 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18", model_name="hrnetv2_w18", **kwargs)
def hrnetv2_w30(**kwargs):
"""
HRNetV2-W30 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w30", model_name="hrnetv2_w30", **kwargs)
def hrnetv2_w32(**kwargs):
"""
HRNetV2-W32 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w32", model_name="hrnetv2_w32", **kwargs)
def hrnetv2_w40(**kwargs):
"""
HRNetV2-W40 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w40", model_name="hrnetv2_w40", **kwargs)
def hrnetv2_w44(**kwargs):
"""
HRNetV2-W44 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w44", model_name="hrnetv2_w44", **kwargs)
def hrnetv2_w48(**kwargs):
"""
HRNetV2-W48 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w48", model_name="hrnetv2_w48", **kwargs)
def hrnetv2_w64(**kwargs):
"""
HRNetV2-W64 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w64", model_name="hrnetv2_w64", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
hrnet_w18_small_v1,
hrnet_w18_small_v2,
hrnetv2_w18,
hrnetv2_w30,
hrnetv2_w32,
hrnetv2_w40,
hrnetv2_w44,
hrnetv2_w48,
hrnetv2_w64,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != hrnet_w18_small_v1 or weight_count == 13187464)
assert (model != hrnet_w18_small_v2 or weight_count == 15597464)
assert (model != hrnetv2_w18 or weight_count == 21299004)
assert (model != hrnetv2_w30 or weight_count == 37712220)
assert (model != hrnetv2_w32 or weight_count == 41232680)
assert (model != hrnetv2_w40 or weight_count == 57557160)
assert (model != hrnetv2_w44 or weight_count == 67064984)
assert (model != hrnetv2_w48 or weight_count == 77469864)
assert (model != hrnetv2_w64 or weight_count == 128059944)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 22,226
| 32.83105
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/fcn8sd.py
|
"""
FCN-8s(d) for image segmentation, implemented in PyTorch.
Original paper: 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038.
"""
__all__ = ['FCN8sd', 'fcn8sd_resnetd50b_voc', 'fcn8sd_resnetd101b_voc', 'fcn8sd_resnetd50b_coco',
'fcn8sd_resnetd101b_coco', 'fcn8sd_resnetd50b_ade20k', 'fcn8sd_resnetd101b_ade20k',
'fcn8sd_resnetd50b_cityscapes', 'fcn8sd_resnetd101b_cityscapes']
import os
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from .common import conv1x1, conv3x3_block
from .resnetd import resnetd50b, resnetd101b
class FCNFinalBlock(nn.Module):
"""
FCN-8s(d) final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_factor=4):
super(FCNFinalBlock, self).__init__()
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.dropout = nn.Dropout(p=0.1, inplace=False)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
bias=True)
def forward(self, x, out_size):
x = self.conv1(x)
x = self.dropout(x)
x = self.conv2(x)
x = F.interpolate(x, size=out_size, mode="bilinear", align_corners=True)
return x
class FCN8sd(nn.Module):
"""
FCN-8s(d) model from 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038.
It is an experimental model mixed FCN-8s and PSPNet.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
num_classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
num_classes=21):
super(FCN8sd, self).__init__()
assert (in_channels > 0)
self.in_size = in_size
self.num_classes = num_classes
self.aux = aux
self.fixed_size = fixed_size
self.backbone = backbone
pool_out_channels = backbone_out_channels
self.final_block = FCNFinalBlock(
in_channels=pool_out_channels,
out_channels=num_classes)
if self.aux:
aux_out_channels = backbone_out_channels // 2
self.aux_block = FCNFinalBlock(
in_channels=aux_out_channels,
out_channels=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, y = self.backbone(x)
x = self.final_block(x, in_size)
if self.aux:
y = self.aux_block(y, in_size)
return x, y
else:
return x
def get_fcn8sd(backbone,
num_classes,
aux=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create FCN-8s(d) model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
num_classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
net = FCN8sd(
backbone=backbone,
num_classes=num_classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def fcn8sd_resnetd50b_voc(pretrained_backbone=False, num_classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for Pascal VOC from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd50b_voc", **kwargs)
def fcn8sd_resnetd101b_voc(pretrained_backbone=False, num_classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for Pascal VOC from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd101b_voc",
**kwargs)
def fcn8sd_resnetd50b_coco(pretrained_backbone=False, num_classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for COCO from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd50b_coco",
**kwargs)
def fcn8sd_resnetd101b_coco(pretrained_backbone=False, num_classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for COCO from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd101b_coco",
**kwargs)
def fcn8sd_resnetd50b_ade20k(pretrained_backbone=False, num_classes=150, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for ADE20K from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd50b_ade20k",
**kwargs)
def fcn8sd_resnetd101b_ade20k(pretrained_backbone=False, num_classes=150, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for ADE20K from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd101b_ade20k",
**kwargs)
def fcn8sd_resnetd50b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for Cityscapes from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd50b_cityscapes",
**kwargs)
def fcn8sd_resnetd101b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for Cityscapes from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd101b_cityscapes",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
in_size = (480, 480)
aux = True
pretrained = False
models = [
(fcn8sd_resnetd50b_voc, 21),
(fcn8sd_resnetd101b_voc, 21),
(fcn8sd_resnetd50b_coco, 21),
(fcn8sd_resnetd101b_coco, 21),
(fcn8sd_resnetd50b_ade20k, 150),
(fcn8sd_resnetd101b_ade20k, 150),
(fcn8sd_resnetd50b_cityscapes, 19),
(fcn8sd_resnetd101b_cityscapes, 19),
]
for model, num_classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != fcn8sd_resnetd50b_voc or weight_count == 35445994)
assert (model != fcn8sd_resnetd101b_voc or weight_count == 54438122)
assert (model != fcn8sd_resnetd50b_coco or weight_count == 35445994)
assert (model != fcn8sd_resnetd101b_coco or weight_count == 54438122)
assert (model != fcn8sd_resnetd50b_ade20k or weight_count == 35545324)
assert (model != fcn8sd_resnetd101b_ade20k or weight_count == 54537452)
assert (model != fcn8sd_resnetd50b_cityscapes or weight_count == 35444454)
assert (model != fcn8sd_resnetd101b_cityscapes or weight_count == 54436582)
else:
assert (model != fcn8sd_resnetd50b_voc or weight_count == 33080789)
assert (model != fcn8sd_resnetd101b_voc or weight_count == 52072917)
assert (model != fcn8sd_resnetd50b_coco or weight_count == 33080789)
assert (model != fcn8sd_resnetd101b_coco or weight_count == 52072917)
assert (model != fcn8sd_resnetd50b_ade20k or weight_count == 33146966)
assert (model != fcn8sd_resnetd101b_ade20k or weight_count == 52139094)
assert (model != fcn8sd_resnetd50b_cityscapes or weight_count == 33079763)
assert (model != fcn8sd_resnetd101b_cityscapes or weight_count == 52071891)
x = torch.randn(1, 3, in_size[0], in_size[1])
ys = net(x)
y = ys[0] if aux else ys
y.sum().backward()
assert ((y.size(0) == x.size(0)) and (y.size(1) == num_classes) and (y.size(2) == x.size(2)) and
(y.size(3) == x.size(3)))
if __name__ == "__main__":
_test()
| 16,126
| 37.125296
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/selecsls.py
|
"""
SelecSLS for ImageNet-1K, implemented in PyTorch.
Original paper: 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
"""
__all__ = ['SelecSLS', 'selecsls42', 'selecsls42b', 'selecsls60', 'selecsls60b', 'selecsls84']
import os
import torch
import torch.nn as nn
from .common import conv1x1_block, conv3x3_block, DualPathSequential
class SelecSLSBlock(nn.Module):
"""
SelecSLS block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(SelecSLSBlock, self).__init__()
mid_channels = 2 * out_channels
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class SelecSLSUnit(nn.Module):
"""
SelecSLS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
skip_channels : int
Number of skipped channels.
mid_channels : int
Number of middle channels.
stride : int or tuple/list of 2 int
Strides of the branch convolution layers.
"""
def __init__(self,
in_channels,
out_channels,
skip_channels,
mid_channels,
stride):
super(SelecSLSUnit, self).__init__()
self.resize = (stride == 2)
mid2_channels = mid_channels // 2
last_channels = 2 * mid_channels + (skip_channels if stride == 1 else 0)
self.branch1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=stride)
self.branch2 = SelecSLSBlock(
in_channels=mid_channels,
out_channels=mid2_channels)
self.branch3 = SelecSLSBlock(
in_channels=mid2_channels,
out_channels=mid2_channels)
self.last_conv = conv1x1_block(
in_channels=last_channels,
out_channels=out_channels)
def forward(self, x, x0):
x1 = self.branch1(x)
x2 = self.branch2(x1)
x3 = self.branch3(x2)
if self.resize:
y = torch.cat((x1, x2, x3), dim=1)
y = self.last_conv(y)
return y, y
else:
y = torch.cat((x1, x2, x3, x0), dim=1)
y = self.last_conv(y)
return y, x0
class SelecSLS(nn.Module):
"""
SelecSLS model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
skip_channels : list of list of int
Number of skipped channels for each unit.
mid_channels : list of list of int
Number of middle channels for each unit.
kernels3 : list of list of int/bool
Using 3x3 (instead of 1x1) kernel for each head unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
skip_channels,
mid_channels,
kernels3,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SelecSLS, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
init_block_channels = 32
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=(1 + len(kernels3)))
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
stride=2))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
k = i - len(skip_channels)
stage = DualPathSequential() if k < 0 else nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if j == 0 else 1
if k < 0:
unit = SelecSLSUnit(
in_channels=in_channels,
out_channels=out_channels,
skip_channels=skip_channels[i][j],
mid_channels=mid_channels[i][j],
stride=stride)
else:
conv_block_class = conv3x3_block if kernels3[k][j] == 1 else conv1x1_block
unit = conv_block_class(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
stage.add_module("unit{}".format(j + 1), unit)
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=4,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight, mode="fan_out", nonlinearity="relu")
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.BatchNorm2d):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_selecsls(version,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SelecSLS model with specific parameters.
Parameters:
----------
version : str
Version of SelecSLS.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if version in ("42", "42b"):
channels = [[64, 128], [144, 288], [304, 480]]
skip_channels = [[0, 64], [0, 144], [0, 304]]
mid_channels = [[64, 64], [144, 144], [304, 304]]
kernels3 = [[1, 1], [1, 0]]
if version == "42":
head_channels = [[960, 1024], [1024, 1280]]
else:
head_channels = [[960, 1024], [1280, 1024]]
elif version in ("60", "60b"):
channels = [[64, 128], [128, 128, 288], [288, 288, 288, 416]]
skip_channels = [[0, 64], [0, 128, 128], [0, 288, 288, 288]]
mid_channels = [[64, 64], [128, 128, 128], [288, 288, 288, 288]]
kernels3 = [[1, 1], [1, 0]]
if version == "60":
head_channels = [[756, 1024], [1024, 1280]]
else:
head_channels = [[756, 1024], [1280, 1024]]
elif version == "84":
channels = [[64, 144], [144, 144, 144, 144, 304], [304, 304, 304, 304, 304, 512]]
skip_channels = [[0, 64], [0, 144, 144, 144, 144], [0, 304, 304, 304, 304, 304]]
mid_channels = [[64, 64], [144, 144, 144, 144, 144], [304, 304, 304, 304, 304, 304]]
kernels3 = [[1, 1], [1, 1]]
head_channels = [[960, 1024], [1024, 1280]]
else:
raise ValueError("Unsupported SelecSLS version {}".format(version))
channels += head_channels
net = SelecSLS(
channels=channels,
skip_channels=skip_channels,
mid_channels=mid_channels,
kernels3=kernels3,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def selecsls42(**kwargs):
"""
SelecSLS-42 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="42", model_name="selecsls42", **kwargs)
def selecsls42b(**kwargs):
"""
SelecSLS-42b model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="42b", model_name="selecsls42b", **kwargs)
def selecsls60(**kwargs):
"""
SelecSLS-60 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="60", model_name="selecsls60", **kwargs)
def selecsls60b(**kwargs):
"""
SelecSLS-60b model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="60b", model_name="selecsls60b", **kwargs)
def selecsls84(**kwargs):
"""
SelecSLS-84 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="84", model_name="selecsls84", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
selecsls42,
selecsls42b,
selecsls60,
selecsls60b,
selecsls84,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != selecsls42 or weight_count == 30354952)
assert (model != selecsls42b or weight_count == 32458248)
assert (model != selecsls60 or weight_count == 30670768)
assert (model != selecsls60b or weight_count == 32774064)
assert (model != selecsls84 or weight_count == 50954600)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 12,347
| 31.580475
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/inceptionv4.py
|
"""
InceptionV4 for ImageNet-1K, implemented in PyTorch.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionV4', 'inceptionv4']
import os
import torch
import torch.nn as nn
from .common import ConvBlock, conv3x3_block, Concurrent
from .inceptionv3 import MaxPoolBranch, AvgPoolBranch, Conv1x1Branch, ConvSeqBranch
class Conv3x3Branch(nn.Module):
"""
InceptionV4 specific convolutional 3x3 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps):
super(Conv3x3Branch, self).__init__()
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2,
padding=0,
bn_eps=bn_eps)
def forward(self, x):
x = self.conv(x)
return x
class ConvSeq3x3Branch(nn.Module):
"""
InceptionV4 specific convolutional sequence branch block with splitting by 3x3.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels_list : list of tuple of int
List of numbers of output channels for middle layers.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels_list,
kernel_size_list,
strides_list,
padding_list,
bn_eps):
super(ConvSeq3x3Branch, self).__init__()
self.conv_list = nn.Sequential()
for i, (mid_channels, kernel_size, strides, padding) in enumerate(zip(
mid_channels_list, kernel_size_list, strides_list, padding_list)):
self.conv_list.add_module("conv{}".format(i + 1), ConvBlock(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=strides,
padding=padding,
bn_eps=bn_eps))
in_channels = mid_channels
self.conv1x3 = ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(1, 3),
stride=1,
padding=(0, 1),
bn_eps=bn_eps)
self.conv3x1 = ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 1),
stride=1,
padding=(1, 0),
bn_eps=bn_eps)
def forward(self, x):
x = self.conv_list(x)
y1 = self.conv1x3(x)
y2 = self.conv3x1(x)
x = torch.cat((y1, y2), dim=1)
return x
class InceptionAUnit(nn.Module):
"""
InceptionV4 type Inception-A unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(InceptionAUnit, self).__init__()
in_channels = 384
self.branches = Concurrent()
self.branches.add_module("branch1", Conv1x1Branch(
in_channels=in_channels,
out_channels=96,
bn_eps=bn_eps))
self.branches.add_module("branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
bn_eps=bn_eps))
self.branches.add_module("branch3", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96, 96),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_eps=bn_eps))
self.branches.add_module("branch4", AvgPoolBranch(
in_channels=in_channels,
out_channels=96,
bn_eps=bn_eps,
count_include_pad=False))
def forward(self, x):
x = self.branches(x)
return x
class ReductionAUnit(nn.Module):
"""
InceptionV4 type Reduction-A unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(ReductionAUnit, self).__init__()
in_channels = 384
self.branches = Concurrent()
self.branches.add_module("branch1", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(384,),
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
bn_eps=bn_eps))
self.branches.add_module("branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_eps=bn_eps))
self.branches.add_module("branch3", MaxPoolBranch())
def forward(self, x):
x = self.branches(x)
return x
class InceptionBUnit(nn.Module):
"""
InceptionV4 type Inception-B unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(InceptionBUnit, self).__init__()
in_channels = 1024
self.branches = Concurrent()
self.branches.add_module("branch1", Conv1x1Branch(
in_channels=in_channels,
out_channels=384,
bn_eps=bn_eps))
self.branches.add_module("branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_eps=bn_eps))
self.branches.add_module("branch3", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 192, 224, 224, 256),
kernel_size_list=(1, (7, 1), (1, 7), (7, 1), (1, 7)),
strides_list=(1, 1, 1, 1, 1),
padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3)),
bn_eps=bn_eps))
self.branches.add_module("branch4", AvgPoolBranch(
in_channels=in_channels,
out_channels=128,
bn_eps=bn_eps,
count_include_pad=False))
def forward(self, x):
x = self.branches(x)
return x
class ReductionBUnit(nn.Module):
"""
InceptionV4 type Reduction-B unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(ReductionBUnit, self).__init__()
in_channels = 1024
self.branches = Concurrent()
self.branches.add_module("branch1", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 192),
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_eps=bn_eps))
self.branches.add_module("branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256, 320, 320),
kernel_size_list=(1, (1, 7), (7, 1), 3),
strides_list=(1, 1, 1, 2),
padding_list=(0, (0, 3), (3, 0), 0),
bn_eps=bn_eps))
self.branches.add_module("branch3", MaxPoolBranch())
def forward(self, x):
x = self.branches(x)
return x
class InceptionCUnit(nn.Module):
"""
InceptionV4 type Inception-C unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(InceptionCUnit, self).__init__()
in_channels = 1536
self.branches = Concurrent()
self.branches.add_module("branch1", Conv1x1Branch(
in_channels=in_channels,
out_channels=256,
bn_eps=bn_eps))
self.branches.add_module("branch2", ConvSeq3x3Branch(
in_channels=in_channels,
out_channels=256,
mid_channels_list=(384,),
kernel_size_list=(1,),
strides_list=(1,),
padding_list=(0,),
bn_eps=bn_eps))
self.branches.add_module("branch3", ConvSeq3x3Branch(
in_channels=in_channels,
out_channels=256,
mid_channels_list=(384, 448, 512),
kernel_size_list=(1, (3, 1), (1, 3)),
strides_list=(1, 1, 1),
padding_list=(0, (1, 0), (0, 1)),
bn_eps=bn_eps))
self.branches.add_module("branch4", AvgPoolBranch(
in_channels=in_channels,
out_channels=256,
bn_eps=bn_eps,
count_include_pad=False))
def forward(self, x):
x = self.branches(x)
return x
class InceptBlock3a(nn.Module):
"""
InceptionV4 type Mixed-3a block.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(InceptBlock3a, self).__init__()
self.branches = Concurrent()
self.branches.add_module("branch1", MaxPoolBranch())
self.branches.add_module("branch2", Conv3x3Branch(
in_channels=64,
out_channels=96,
bn_eps=bn_eps))
def forward(self, x):
x = self.branches(x)
return x
class InceptBlock4a(nn.Module):
"""
InceptionV4 type Mixed-4a block.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(InceptBlock4a, self).__init__()
self.branches = Concurrent()
self.branches.add_module("branch1", ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 0),
bn_eps=bn_eps))
self.branches.add_module("branch2", ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 64, 64, 96),
kernel_size_list=(1, (1, 7), (7, 1), 3),
strides_list=(1, 1, 1, 1),
padding_list=(0, (0, 3), (3, 0), 0),
bn_eps=bn_eps))
def forward(self, x):
x = self.branches(x)
return x
class InceptBlock5a(nn.Module):
"""
InceptionV4 type Mixed-5a block.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(InceptBlock5a, self).__init__()
self.branches = Concurrent()
self.branches.add_module("branch1", Conv3x3Branch(
in_channels=192,
out_channels=192,
bn_eps=bn_eps))
self.branches.add_module("branch2", MaxPoolBranch())
def forward(self, x):
x = self.branches(x)
return x
class InceptInitBlock(nn.Module):
"""
InceptionV4 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
bn_eps):
super(InceptInitBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
stride=2,
padding=0,
bn_eps=bn_eps)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
stride=1,
padding=0,
bn_eps=bn_eps)
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
stride=1,
padding=1,
bn_eps=bn_eps)
self.block1 = InceptBlock3a(bn_eps=bn_eps)
self.block2 = InceptBlock4a(bn_eps=bn_eps)
self.block3 = InceptBlock5a(bn_eps=bn_eps)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
return x
class InceptionV4(nn.Module):
"""
InceptionV4 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
dropout_rate=0.0,
bn_eps=1e-5,
in_channels=3,
in_size=(299, 299),
num_classes=1000):
super(InceptionV4, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
layers = [4, 8, 4]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
self.features = nn.Sequential()
self.features.add_module("init_block", InceptInitBlock(
in_channels=in_channels,
bn_eps=bn_eps))
for i, layers_per_stage in enumerate(layers):
stage = nn.Sequential()
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
else:
unit = normal_units[i]
stage.add_module("unit{}".format(j + 1), unit(bn_eps=bn_eps))
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Sequential()
if dropout_rate > 0.0:
self.output.add_module("dropout", nn.Dropout(p=dropout_rate))
self.output.add_module("fc", nn.Linear(
in_features=1536,
out_features=num_classes))
self._init_params()
def _init_params(self):
for module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_inceptionv4(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create InceptionV4 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
net = InceptionV4(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def inceptionv4(**kwargs):
"""
InceptionV4 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_inceptionv4(model_name="inceptionv4", bn_eps=1e-3, **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
inceptionv4,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != InceptionV4 or weight_count == 42679816)
x = torch.randn(1, 3, 299, 299)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 17,876
| 28.944724
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/regnet.py
|
"""
RegNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
"""
__all__ = ['RegNet', 'regnetx002', 'regnetx004', 'regnetx006', 'regnetx008', 'regnetx016', 'regnetx032', 'regnetx040',
'regnetx064', 'regnetx080', 'regnetx120', 'regnetx160', 'regnetx320', 'regnety002', 'regnety004',
'regnety006', 'regnety008', 'regnety016', 'regnety032', 'regnety040', 'regnety064', 'regnety080',
'regnety120', 'regnety160', 'regnety320']
import os
import numpy as np
import torch.nn as nn
from .common import conv1x1_block, conv3x3_block, SEBlock
class RegNetBottleneck(nn.Module):
"""
RegNet bottleneck block for residual path in RegNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
groups : int
Number of groups.
use_se : bool
Whether to use SE-module.
bottleneck_factor : int, default 1
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
groups,
use_se,
bottleneck_factor=1):
super(RegNetBottleneck, self).__init__()
self.use_se = use_se
mid_channels = out_channels // bottleneck_factor
mid_groups = mid_channels // groups
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
groups=mid_groups)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
mid_channels=(in_channels // 4))
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
if self.use_se:
x = self.se(x)
x = self.conv3(x)
return x
class RegNetUnit(nn.Module):
"""
RegNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
groups : int
Number of groups.
use_se : bool
Whether to use SE-module.
"""
def __init__(self,
in_channels,
out_channels,
stride,
groups,
use_se):
super(RegNetUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = RegNetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
groups=groups,
use_se=use_se)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class RegNet(nn.Module):
"""
RegNet model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : list of int
Number of groups for each stage.
use_se : bool
Whether to use SE-module.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
groups,
use_se,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(RegNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
stride=2,
padding=1))
in_channels = init_block_channels
for i, (channels_per_stage, groups_per_stage) in enumerate(zip(channels, groups)):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) else 1
stage.add_module("unit{}".format(j + 1), RegNetUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
groups=groups_per_stage,
use_se=use_se))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_regnet(channels_init,
channels_slope,
channels_mult,
depth,
groups,
use_se=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create RegNet model with specific parameters.
Parameters:
----------
channels_init : float
Initial value for channels/widths.
channels_slope : float
Slope value for channels/widths.
width_mult : float
Width multiplier value.
groups : int
Number of groups.
depth : int
Depth value.
use_se : bool, default False
Whether to use SE-module.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
divisor = 8
assert (channels_slope >= 0) and (channels_init > 0) and (channels_mult > 1) and (channels_init % divisor == 0)
# Generate continuous per-block channels/widths:
channels_cont = np.arange(depth) * channels_slope + channels_init
# Generate quantized per-block channels/widths:
channels_exps = np.round(np.log(channels_cont / channels_init) / np.log(channels_mult))
channels = channels_init * np.power(channels_mult, channels_exps)
channels = (np.round(channels / divisor) * divisor).astype(np.int)
# Generate per stage channels/widths and layers/depths:
channels_per_stage, layers = np.unique(channels, return_counts=True)
# Adjusts the compatibility of channels/widths and groups:
groups_per_stage = [min(groups, c) for c in channels_per_stage]
channels_per_stage = [int(round(c / g) * g) for c, g in zip(channels_per_stage, groups_per_stage)]
channels = [[ci] * li for (ci, li) in zip(channels_per_stage, layers)]
init_block_channels = 32
net = RegNet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups_per_stage,
use_se=use_se,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def regnetx002(**kwargs):
"""
RegNetX-200MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=8,
model_name="regnetx002", **kwargs)
def regnetx004(**kwargs):
"""
RegNetX-400MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=24.48, channels_mult=2.54, depth=22, groups=16,
model_name="regnetx004", **kwargs)
def regnetx006(**kwargs):
"""
RegNetX-600MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=36.97, channels_mult=2.24, depth=16, groups=24,
model_name="regnetx006", **kwargs)
def regnetx008(**kwargs):
"""
RegNetX-800MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=56, channels_slope=35.73, channels_mult=2.28, depth=16, groups=16,
model_name="regnetx008", **kwargs)
def regnetx016(**kwargs):
"""
RegNetX-1.6GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=34.01, channels_mult=2.25, depth=18, groups=24,
model_name="regnetx016", **kwargs)
def regnetx032(**kwargs):
"""
RegNetX-3.2GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=88, channels_slope=26.31, channels_mult=2.25, depth=25, groups=48,
model_name="regnetx032", **kwargs)
def regnetx040(**kwargs):
"""
RegNetX-4.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=96, channels_slope=38.65, channels_mult=2.43, depth=23, groups=40,
model_name="regnetx040", **kwargs)
def regnetx064(**kwargs):
"""
RegNetX-6.4GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=184, channels_slope=60.83, channels_mult=2.07, depth=17, groups=56,
model_name="regnetx064", **kwargs)
def regnetx080(**kwargs):
"""
RegNetX-8.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=49.56, channels_mult=2.88, depth=23, groups=120,
model_name="regnetx080", **kwargs)
def regnetx120(**kwargs):
"""
RegNetX-12GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=168, channels_slope=73.36, channels_mult=2.37, depth=19, groups=112,
model_name="regnetx120", **kwargs)
def regnetx160(**kwargs):
"""
RegNetX-16GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=216, channels_slope=55.59, channels_mult=2.1, depth=22, groups=128,
model_name="regnetx160", **kwargs)
def regnetx320(**kwargs):
"""
RegNetX-32GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=320, channels_slope=69.86, channels_mult=2.0, depth=23, groups=168,
model_name="regnetx320", **kwargs)
def regnety002(**kwargs):
"""
RegNetY-200MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=8, use_se=True,
model_name="regnety002", **kwargs)
def regnety004(**kwargs):
"""
RegNetY-400MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=27.89, channels_mult=2.09, depth=16, groups=8, use_se=True,
model_name="regnety004", **kwargs)
def regnety006(**kwargs):
"""
RegNetY-600MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=32.54, channels_mult=2.32, depth=15, groups=16, use_se=True,
model_name="regnety006", **kwargs)
def regnety008(**kwargs):
"""
RegNetY-800MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=56, channels_slope=38.84, channels_mult=2.4, depth=14, groups=16, use_se=True,
model_name="regnety008", **kwargs)
def regnety016(**kwargs):
"""
RegNetY-1.6GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=20.71, channels_mult=2.65, depth=27, groups=24, use_se=True,
model_name="regnety016", **kwargs)
def regnety032(**kwargs):
"""
RegNetY-3.2GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=42.63, channels_mult=2.66, depth=21, groups=24, use_se=True,
model_name="regnety032", **kwargs)
def regnety040(**kwargs):
"""
RegNetY-4.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=96, channels_slope=31.41, channels_mult=2.24, depth=22, groups=64, use_se=True,
model_name="regnety040", **kwargs)
def regnety064(**kwargs):
"""
RegNetY-6.4GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=112, channels_slope=33.22, channels_mult=2.27, depth=25, groups=72, use_se=True,
model_name="regnety064", **kwargs)
def regnety080(**kwargs):
"""
RegNetY-8.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=192, channels_slope=76.82, channels_mult=2.19, depth=17, groups=56, use_se=True,
model_name="regnety080", **kwargs)
def regnety120(**kwargs):
"""
RegNetY-12GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=168, channels_slope=73.36, channels_mult=2.37, depth=19, groups=112, use_se=True,
model_name="regnety120", **kwargs)
def regnety160(**kwargs):
"""
RegNetY-16GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=200, channels_slope=106.23, channels_mult=2.48, depth=18, groups=112, use_se=True,
model_name="regnety160", **kwargs)
def regnety320(**kwargs):
"""
RegNetY-32GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=232, channels_slope=115.89, channels_mult=2.53, depth=20, groups=232, use_se=True,
model_name="regnety320", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
regnetx002,
regnetx004,
regnetx006,
regnetx008,
regnetx016,
regnetx032,
regnetx040,
regnetx064,
regnetx080,
regnetx120,
regnetx160,
regnetx320,
regnety002,
regnety004,
regnety006,
regnety008,
regnety016,
regnety032,
regnety040,
regnety064,
regnety080,
regnety120,
regnety160,
regnety320,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != regnetx002 or weight_count == 2684792)
assert (model != regnetx004 or weight_count == 5157512)
assert (model != regnetx006 or weight_count == 6196040)
assert (model != regnetx008 or weight_count == 7259656)
assert (model != regnetx016 or weight_count == 9190136)
assert (model != regnetx032 or weight_count == 15296552)
assert (model != regnetx040 or weight_count == 22118248)
assert (model != regnetx064 or weight_count == 26209256)
assert (model != regnetx080 or weight_count == 39572648)
assert (model != regnetx120 or weight_count == 46106056)
assert (model != regnetx160 or weight_count == 54278536)
assert (model != regnetx320 or weight_count == 107811560)
assert (model != regnety002 or weight_count == 3162996)
assert (model != regnety004 or weight_count == 4344144)
assert (model != regnety006 or weight_count == 6055160)
assert (model != regnety008 or weight_count == 6263168)
assert (model != regnety016 or weight_count == 11202430)
assert (model != regnety032 or weight_count == 19436338)
assert (model != regnety040 or weight_count == 20646656)
assert (model != regnety064 or weight_count == 30583252)
assert (model != regnety080 or weight_count == 39180068)
assert (model != regnety120 or weight_count == 51822544)
assert (model != regnety160 or weight_count == 83590140)
assert (model != regnety320 or weight_count == 145046770)
batch = 14
size = 224
x = torch.randn(batch, 3, size, size)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (batch, 1000))
if __name__ == "__main__":
_test()
| 24,321
| 32.874652
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/icnet.py
|
"""
ICNet for image segmentation, implemented in PyTorch.
Original paper: 'ICNet for Real-Time Semantic Segmentation on High-Resolution Images,'
https://arxiv.org/abs/1704.08545.
"""
__all__ = ['ICNet', 'icnet_resnetd50b_cityscapes']
import os
import torch.nn as nn
from .common import conv1x1, conv1x1_block, conv3x3_block, InterpolationBlock, MultiOutputSequential
from .pspnet import PyramidPooling
from .resnetd import resnetd50b
class ICInitBlock(nn.Module):
"""
ICNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(ICInitBlock, self).__init__()
mid_channels = out_channels // 2
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=2)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=2)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
stride=2)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class PSPBlock(nn.Module):
"""
ICNet specific PSPNet reduced head block.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
bottleneck_factor : int
Bottleneck factor.
"""
def __init__(self,
in_channels,
upscale_out_size,
bottleneck_factor):
super(PSPBlock, self).__init__()
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
self.pool = PyramidPooling(
in_channels=in_channels,
upscale_out_size=upscale_out_size)
self.conv = conv3x3_block(
in_channels=4096,
out_channels=mid_channels)
self.dropout = nn.Dropout(p=0.1, inplace=False)
def forward(self, x):
x = self.pool(x)
x = self.conv(x)
x = self.dropout(x)
return x
class CFFBlock(nn.Module):
"""
Cascade Feature Fusion block.
Parameters:
----------
in_channels_low : int
Number of input channels (low input).
in_channels_high : int
Number of input channels (low high).
out_channels : int
Number of output channels.
num_classes : int
Number of classification classes.
"""
def __init__(self,
in_channels_low,
in_channels_high,
out_channels,
num_classes):
super(CFFBlock, self).__init__()
self.up = InterpolationBlock(scale_factor=2)
self.conv_low = conv3x3_block(
in_channels=in_channels_low,
out_channels=out_channels,
padding=2,
dilation=2,
activation=None)
self.conv_hign = conv1x1_block(
in_channels=in_channels_high,
out_channels=out_channels,
activation=None)
self.activ = nn.ReLU(inplace=True)
self.conv_cls = conv1x1(
in_channels=out_channels,
out_channels=num_classes)
def forward(self, xl, xh):
xl = self.up(xl)
xl = self.conv_low(xl)
xh = self.conv_hign(xh)
x = xl + xh
x = self.activ(x)
x_cls = self.conv_cls(xl)
return x, x_cls
class ICHeadBlock(nn.Module):
"""
ICNet head block.
Parameters:
----------
num_classes : int
Number of classification classes.
"""
def __init__(self,
num_classes):
super(ICHeadBlock, self).__init__()
self.cff_12 = CFFBlock(
in_channels_low=128,
in_channels_high=64,
out_channels=128,
num_classes=num_classes)
self.cff_24 = CFFBlock(
in_channels_low=256,
in_channels_high=256,
out_channels=128,
num_classes=num_classes)
self.up_x2 = InterpolationBlock(scale_factor=2)
self.up_x8 = InterpolationBlock(scale_factor=4)
self.conv_cls = conv1x1(
in_channels=128,
out_channels=num_classes)
def forward(self, x1, x2, x4):
outputs = []
x_cff_24, x_24_cls = self.cff_24(x4, x2)
outputs.append(x_24_cls)
x_cff_12, x_12_cls = self.cff_12(x_cff_24, x1)
outputs.append(x_12_cls)
up_x2 = self.up_x2(x_cff_12)
up_x2 = self.conv_cls(up_x2)
outputs.append(up_x2)
up_x8 = self.up_x8(up_x2)
outputs.append(up_x8)
# 1 -> 1/4 -> 1/8 -> 1/16
outputs.reverse()
return tuple(outputs)
class ICNet(nn.Module):
"""
ICNet model from 'ICNet for Real-Time Semantic Segmentation on High-Resolution Images,'
https://arxiv.org/abs/1704.08545.
Parameters:
----------
backbones : tuple of nn.Sequential
Feature extractors.
backbones_out_channels : tuple of int
Number of output channels form each feature extractor.
num_classes : tuple of int
Number of output channels for each branch.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
num_classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
backbones,
backbones_out_channels,
channels,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
num_classes=21):
super(ICNet, self).__init__()
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.num_classes = num_classes
self.aux = aux
self.fixed_size = fixed_size
psp_pool_out_size = (self.in_size[0] // 32, self.in_size[1] // 32) if fixed_size else None
psp_head_out_channels = 512
self.branch1 = ICInitBlock(
in_channels=in_channels,
out_channels=channels[0])
self.branch2 = MultiOutputSequential()
self.branch2.add_module("down1", InterpolationBlock(scale_factor=2, up=False))
backbones[0].do_output = True
self.branch2.add_module("backbones1", backbones[0])
self.branch2.add_module("down2", InterpolationBlock(scale_factor=2, up=False))
self.branch2.add_module("backbones2", backbones[1])
self.branch2.add_module("psp", PSPBlock(
in_channels=backbones_out_channels[1],
upscale_out_size=psp_pool_out_size,
bottleneck_factor=4))
self.branch2.add_module("final_block", conv1x1_block(
in_channels=psp_head_out_channels,
out_channels=channels[2]))
self.conv_y2 = conv1x1_block(
in_channels=backbones_out_channels[0],
out_channels=channels[1])
self.final_block = ICHeadBlock(num_classes=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
y1 = self.branch1(x)
y3, y2 = self.branch2(x)
y2 = self.conv_y2(y2)
x = self.final_block(y1, y2, y3)
if self.aux:
return x
else:
return x[0]
def get_icnet(backbones,
backbones_out_channels,
num_classes,
aux=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ICNet model with specific parameters.
Parameters:
----------
backbones : tuple of nn.Sequential
Feature extractors.
backbones_out_channels : tuple of int
Number of output channels form each feature extractor.
num_classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels = (64, 256, 256)
backbones[0].multi_output = False
backbones[1].multi_output = False
net = ICNet(
backbones=backbones,
backbones_out_channels=backbones_out_channels,
channels=channels,
num_classes=num_classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def icnet_resnetd50b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs):
"""
ICNet model on the base of ResNet(D)-50b for Cityscapes from 'ICNet for Real-Time Semantic Segmentation on
High-Resolution Images,' https://arxiv.org/abs/1704.08545.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone1 = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=None).features
for i in range(len(backbone1) - 3):
del backbone1[-1]
backbone2 = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=None).features
del backbone2[-1]
for i in range(3):
del backbone2[0]
backbones = (backbone1, backbone2)
backbones_out_channels = (512, 2048)
return get_icnet(backbones=backbones, backbones_out_channels=backbones_out_channels, num_classes=num_classes,
aux=aux, model_name="icnet_resnetd50b_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
in_size = (480, 480)
aux = False
fixed_size = False
pretrained = False
models = [
(icnet_resnetd50b_cityscapes, 19),
]
for model, num_classes in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size, aux=aux)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != icnet_resnetd50b_cityscapes or weight_count == 47489184)
x = torch.randn(1, 3, in_size[0], in_size[1])
ys = net(x)
y = ys[0] if aux else ys
y.sum().backward()
assert ((y.size(0) == x.size(0)) and (y.size(1) == num_classes) and (y.size(2) == x.size(2)) and
(y.size(3) == x.size(3)))
if __name__ == "__main__":
_test()
| 12,295
| 29.894472
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/mobilenetb.py
|
"""
MobileNet(B) with simplified depthwise separable convolution block for ImageNet-1K, implemented in Gluon.
Original paper: 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
"""
__all__ = ['mobilenetb_w1', 'mobilenetb_w3d4', 'mobilenetb_wd2', 'mobilenetb_wd4']
from .mobilenet import get_mobilenet
def mobilenetb_w1(**kwargs):
"""
1.0 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=1.0, dws_simplified=True, model_name="mobilenetb_w1", **kwargs)
def mobilenetb_w3d4(**kwargs):
"""
0.75 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.75, dws_simplified=True, model_name="mobilenetb_w3d4", **kwargs)
def mobilenetb_wd2(**kwargs):
"""
0.5 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.5, dws_simplified=True, model_name="mobilenetb_wd2", **kwargs)
def mobilenetb_wd4(**kwargs):
"""
0.25 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.25, dws_simplified=True, model_name="mobilenetb_wd4", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
mobilenetb_w1,
mobilenetb_w3d4,
mobilenetb_wd2,
mobilenetb_wd4,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetb_w1 or weight_count == 4222056)
assert (model != mobilenetb_w3d4 or weight_count == 2578120)
assert (model != mobilenetb_wd2 or weight_count == 1326632)
assert (model != mobilenetb_wd4 or weight_count == 467592)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 3,794
| 32.289474
| 113
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/shakedropresnet_cifar.py
|
"""
ShakeDrop-ResNet for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'ShakeDrop Regularization for Deep Residual Learning,' https://arxiv.org/abs/1802.02375.
"""
__all__ = ['CIFARShakeDropResNet', 'shakedropresnet20_cifar10', 'shakedropresnet20_cifar100', 'shakedropresnet20_svhn']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block
from .resnet import ResBlock, ResBottleneck
class ShakeDrop(torch.autograd.Function):
"""
ShakeDrop function.
"""
@staticmethod
def forward(ctx, x, b, alpha):
y = (b + alpha - b * alpha) * x
ctx.save_for_backward(b)
return y
@staticmethod
def backward(ctx, dy):
beta = torch.rand(dy.size(0), dtype=dy.dtype, device=dy.device).view(-1, 1, 1, 1)
b, = ctx.saved_tensors
return (b + beta - b * beta) * dy, None, None
class ShakeDropResUnit(nn.Module):
"""
ShakeDrop-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_prob : float
Residual branch life probability.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck,
life_prob):
super(ShakeDropResUnit, self).__init__()
self.life_prob = life_prob
self.resize_identity = (in_channels != out_channels) or (stride != 1)
body_class = ResBottleneck if bottleneck else ResBlock
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
self.shake_drop = ShakeDrop.apply
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
if self.training:
b = torch.bernoulli(torch.full((1,), self.life_prob, dtype=x.dtype, device=x.device))
alpha = torch.empty(x.size(0), dtype=x.dtype, device=x.device).view(-1, 1, 1, 1).uniform_(-1.0, 1.0)
x = self.shake_drop(x, b, alpha)
else:
x = self.life_prob * x
x = x + identity
x = self.activ(x)
return x
class CIFARShakeDropResNet(nn.Module):
"""
ShakeDrop-ResNet model for CIFAR from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_probs : list of float
Residual branch life probability for each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
life_probs,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARShakeDropResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
k = 0
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), ShakeDropResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
life_prob=life_probs[k]))
in_channels = out_channels
k += 1
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_shakedropresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ShakeDrop-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
channels_per_layers = [16, 32, 64]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
total_layers = sum(layers)
final_death_prob = 0.5
life_probs = [1.0 - float(i + 1) / float(total_layers) * final_death_prob for i in range(total_layers)]
net = CIFARShakeDropResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
life_probs=life_probs,
num_classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def shakedropresnet20_cifar10(classes=10, **kwargs):
"""
ShakeDrop-ResNet-20 model for CIFAR-10 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_cifar10", **kwargs)
def shakedropresnet20_cifar100(classes=100, **kwargs):
"""
ShakeDrop-ResNet-20 model for CIFAR-100 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_cifar100", **kwargs)
def shakedropresnet20_svhn(classes=10, **kwargs):
"""
ShakeDrop-ResNet-20 model for SVHN from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_svhn", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(shakedropresnet20_cifar10, 10),
(shakedropresnet20_cifar100, 100),
(shakedropresnet20_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shakedropresnet20_cifar10 or weight_count == 272474)
assert (model != shakedropresnet20_cifar100 or weight_count == 278324)
assert (model != shakedropresnet20_svhn or weight_count == 272474)
x = torch.randn(14, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (14, num_classes))
if __name__ == "__main__":
_test()
| 10,750
| 31.677812
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/inceptionresnetv1.py
|
"""
InceptionResNetV1 for ImageNet-1K, implemented in PyTorch.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionResNetV1', 'inceptionresnetv1', 'InceptionAUnit', 'InceptionBUnit', 'InceptionCUnit',
'ReductionAUnit', 'ReductionBUnit']
import os
import torch.nn as nn
from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent
from .inceptionv3 import MaxPoolBranch, Conv1x1Branch, ConvSeqBranch
class InceptionAUnit(nn.Module):
"""
InceptionResNetV1 type Inception-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps):
super(InceptionAUnit, self).__init__()
self.scale = 0.17
self.branches = Concurrent()
self.branches.add_module("branch1", Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_eps=bn_eps))
self.branches.add_module("branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:3],
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
bn_eps=bn_eps))
self.branches.add_module("branch3", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[3:6],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_eps=bn_eps))
conv_in_channels = out_channels_list[0] + out_channels_list[2] + out_channels_list[5]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
bias=True)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
x = self.branches(x)
x = self.conv(x)
x = self.scale * x + identity
x = self.activ(x)
return x
class InceptionBUnit(nn.Module):
"""
InceptionResNetV1 type Inception-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps):
super(InceptionBUnit, self).__init__()
self.scale = 0.10
self.branches = Concurrent()
self.branches.add_module("branch1", Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_eps=bn_eps))
self.branches.add_module("branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_eps=bn_eps))
conv_in_channels = out_channels_list[0] + out_channels_list[3]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
bias=True)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
x = self.branches(x)
x = self.conv(x)
x = self.scale * x + identity
x = self.activ(x)
return x
class InceptionCUnit(nn.Module):
"""
InceptionResNetV1 type Inception-C unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
scale : float, default 0.2
Scale value for residual branch.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps,
scale=0.2,
activate=True):
super(InceptionCUnit, self).__init__()
self.activate = activate
self.scale = scale
self.branches = Concurrent()
self.branches.add_module("branch1", Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_eps=bn_eps))
self.branches.add_module("branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, (1, 3), (3, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 1), (1, 0)),
bn_eps=bn_eps))
conv_in_channels = out_channels_list[0] + out_channels_list[3]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
bias=True)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
x = self.branches(x)
x = self.conv(x)
x = self.scale * x + identity
if self.activate:
x = self.activ(x)
return x
class ReductionAUnit(nn.Module):
"""
InceptionResNetV1 type Reduction-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps):
super(ReductionAUnit, self).__init__()
self.branches = Concurrent()
self.branches.add_module("branch1", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[0:1],
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
bn_eps=bn_eps))
self.branches.add_module("branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_eps=bn_eps))
self.branches.add_module("branch3", MaxPoolBranch())
def forward(self, x):
x = self.branches(x)
return x
class ReductionBUnit(nn.Module):
"""
InceptionResNetV1 type Reduction-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps):
super(ReductionBUnit, self).__init__()
self.branches = Concurrent()
self.branches.add_module("branch1", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[0:2],
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_eps=bn_eps))
self.branches.add_module("branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[2:4],
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_eps=bn_eps))
self.branches.add_module("branch3", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[4:7],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_eps=bn_eps))
self.branches.add_module("branch4", MaxPoolBranch())
def forward(self, x):
x = self.branches(x)
return x
class InceptInitBlock(nn.Module):
"""
InceptionResNetV1 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
bn_eps):
super(InceptInitBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
stride=2,
padding=0,
bn_eps=bn_eps)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
stride=1,
padding=0,
bn_eps=bn_eps)
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
stride=1,
padding=1,
bn_eps=bn_eps)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=0)
self.conv4 = conv1x1_block(
in_channels=64,
out_channels=80,
stride=1,
padding=0,
bn_eps=bn_eps)
self.conv5 = conv3x3_block(
in_channels=80,
out_channels=192,
stride=1,
padding=0,
bn_eps=bn_eps)
self.conv6 = conv3x3_block(
in_channels=192,
out_channels=256,
stride=2,
padding=0,
bn_eps=bn_eps)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.conv6(x)
return x
class InceptHead(nn.Module):
"""
InceptionResNetV1 specific classification block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float
Small float added to variance in Batch norm.
dropout_rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
num_classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
bn_eps,
dropout_rate,
num_classes):
super(InceptHead, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
self.fc1 = nn.Linear(
in_features=in_channels,
out_features=512,
bias=False)
self.bn = nn.BatchNorm1d(
num_features=512,
eps=bn_eps)
self.fc2 = nn.Linear(
in_features=512,
out_features=num_classes)
def forward(self, x):
if self.use_dropout:
x = self.dropout(x)
x = self.fc1(x)
x = self.bn(x)
x = self.fc2(x)
return x
class InceptionResNetV1(nn.Module):
"""
InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
dropout_prob=0.6,
bn_eps=1e-5,
in_channels=3,
in_size=(299, 299),
num_classes=1000):
super(InceptionResNetV1, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
layers = [5, 11, 7]
in_channels_list = [256, 896, 1792]
normal_out_channels_list = [[32, 32, 32, 32, 32, 32], [128, 128, 128, 128], [192, 192, 192, 192]]
reduction_out_channels_list = [[384, 192, 192, 256], [256, 384, 256, 256, 256, 256, 256]]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
self.features = nn.Sequential()
self.features.add_module("init_block", InceptInitBlock(
in_channels=in_channels,
bn_eps=bn_eps))
in_channels = in_channels_list[0]
for i, layers_per_stage in enumerate(layers):
stage = nn.Sequential()
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
out_channels_list_per_stage = reduction_out_channels_list[i - 1]
else:
unit = normal_units[i]
out_channels_list_per_stage = normal_out_channels_list[i]
if (i == len(layers) - 1) and (j == layers_per_stage - 1):
unit_kwargs = {"scale": 1.0, "activate": False}
else:
unit_kwargs = {}
stage.add_module("unit{}".format(j + 1), unit(
in_channels=in_channels,
out_channels_list=out_channels_list_per_stage,
bn_eps=bn_eps,
**unit_kwargs))
if (j == 0) and (i != 0):
in_channels = in_channels_list[i]
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = InceptHead(
in_channels=in_channels,
bn_eps=bn_eps,
dropout_rate=dropout_prob,
num_classes=num_classes)
self._init_params()
def _init_params(self):
for module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_inceptionresnetv1(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create InceptionResNetV1 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
net = InceptionResNetV1(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def inceptionresnetv1(**kwargs):
"""
InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_inceptionresnetv1(model_name="inceptionresnetv1", bn_eps=1e-3, **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
inceptionresnetv1,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionresnetv1 or weight_count == 23995624)
x = torch.randn(1, 3, 299, 299)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 16,987
| 30.285451
| 117
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/scnet.py
|
"""
SCNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
"""
__all__ = ['SCNet', 'scnet50', 'scnet101', 'scneta50', 'scneta101']
import os
import torch
import torch.nn as nn
from .common import conv1x1_block, conv3x3_block, InterpolationBlock
from .resnet import ResInitBlock
from .senet import SEInitBlock
from .resnesta import ResNeStADownBlock
class ScDownBlock(nn.Module):
"""
SCNet specific convolutional downscale block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pool_size: int or list/tuple of 2 ints, default 2
Size of the average pooling windows.
"""
def __init__(self,
in_channels,
out_channels,
pool_size=2):
super(ScDownBlock, self).__init__()
self.pool = nn.AvgPool2d(
kernel_size=pool_size,
stride=pool_size)
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.pool(x)
x = self.conv(x)
return x
class ScConv(nn.Module):
"""
Self-calibrated convolutional block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
scale_factor : int
Scale factor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
scale_factor):
super(ScConv, self).__init__()
self.down = ScDownBlock(
in_channels=in_channels,
out_channels=out_channels,
pool_size=scale_factor)
self.up = InterpolationBlock(
scale_factor=scale_factor,
mode="nearest",
align_corners=None)
self.sigmoid = nn.Sigmoid()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
activation=None)
self.conv2 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
def forward(self, x):
w = self.sigmoid(x + self.up(self.down(x), size=x.shape[2:]))
x = self.conv1(x) * w
x = self.conv2(x)
return x
class ScBottleneck(nn.Module):
"""
SCNet specific bottleneck block for residual path in SCNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck_factor : int, default 4
Bottleneck factor.
scale_factor : int, default 4
Scale factor.
avg_downsample : bool, default False
Whether to use average downsampling.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck_factor=4,
scale_factor=4,
avg_downsample=False):
super(ScBottleneck, self).__init__()
self.avg_resize = (stride > 1) and avg_downsample
mid_channels = out_channels // bottleneck_factor // 2
self.conv1a = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2a = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=(1 if self.avg_resize else stride))
self.conv1b = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2b = ScConv(
in_channels=mid_channels,
out_channels=mid_channels,
stride=(1 if self.avg_resize else stride),
scale_factor=scale_factor)
if self.avg_resize:
self.pool = nn.AvgPool2d(
kernel_size=3,
stride=stride,
padding=1)
self.conv3 = conv1x1_block(
in_channels=(2 * mid_channels),
out_channels=out_channels,
activation=None)
def forward(self, x):
y = self.conv1a(x)
y = self.conv2a(y)
z = self.conv1b(x)
z = self.conv2b(z)
if self.avg_resize:
y = self.pool(y)
z = self.pool(z)
x = torch.cat((y, z), dim=1)
x = self.conv3(x)
return x
class ScUnit(nn.Module):
"""
SCNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
avg_downsample : bool, default False
Whether to use average downsampling.
"""
def __init__(self,
in_channels,
out_channels,
stride,
avg_downsample=False):
super(ScUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = ScBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
avg_downsample=avg_downsample)
if self.resize_identity:
if avg_downsample:
self.identity_block = ResNeStADownBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
else:
self.identity_block = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_block(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class SCNet(nn.Module):
"""
SCNet model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
se_init_block : bool, default False
SENet-like initial block.
avg_downsample : bool, default False
Whether to use average downsampling.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
se_init_block=False,
avg_downsample=False,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SCNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
init_block_class = SEInitBlock if se_init_block else ResInitBlock
self.features.add_module("init_block", init_block_class(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), ScUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
avg_downsample=avg_downsample))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_scnet(blocks,
width_scale=1.0,
se_init_block=False,
avg_downsample=False,
init_block_channels_scale=1,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SCNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
width_scale : float, default 1.0
Scale factor for width of layers.
se_init_block : bool, default False
SENet-like initial block.
avg_downsample : bool, default False
Whether to use average downsampling.
init_block_channels_scale : int, default 1
Scale factor for number of output channels in the initial unit.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 14:
layers = [1, 1, 1, 1]
elif blocks == 26:
layers = [2, 2, 2, 2]
elif blocks == 38:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SCNet with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
init_block_channels *= init_block_channels_scale
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = SCNet(
channels=channels,
init_block_channels=init_block_channels,
se_init_block=se_init_block,
avg_downsample=avg_downsample,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def scnet50(**kwargs):
"""
SCNet-50 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=50, model_name="scnet50", **kwargs)
def scnet101(**kwargs):
"""
SCNet-101 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=101, model_name="scnet101", **kwargs)
def scneta50(**kwargs):
"""
SCNet(A)-50 with average downsampling model from 'Improving Convolutional Networks with Self-Calibrated
Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=50, se_init_block=True, avg_downsample=True, model_name="scneta50", **kwargs)
def scneta101(**kwargs):
"""
SCNet(A)-101 with average downsampling model from 'Improving Convolutional Networks with Self-Calibrated
Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=101, se_init_block=True, avg_downsample=True, init_block_channels_scale=2,
model_name="scneta101", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
scnet50,
scnet101,
scneta50,
scneta101,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != scnet50 or weight_count == 25564584)
assert (model != scnet101 or weight_count == 44565416)
assert (model != scneta50 or weight_count == 25583816)
assert (model != scneta101 or weight_count == 44689192)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 14,943
| 29.876033
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/igcv3.py
|
"""
IGCV3 for ImageNet-1K, implemented in PyTorch.
Original paper: 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
"""
__all__ = ['IGCV3', 'igcv3_w1', 'igcv3_w3d4', 'igcv3_wd2', 'igcv3_wd4']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, ChannelShuffle
class InvResUnit(nn.Module):
"""
So-called 'Inverted Residual Unit' layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the second convolution layer.
expansion : bool
Whether do expansion of channels.
"""
def __init__(self,
in_channels,
out_channels,
stride,
expansion):
super(InvResUnit, self).__init__()
self.residual = (in_channels == out_channels) and (stride == 1)
mid_channels = in_channels * 6 if expansion else in_channels
groups = 2
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
groups=groups,
activation=None)
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups)
self.conv2 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
activation="relu6")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
activation=None)
def forward(self, x):
if self.residual:
identity = x
x = self.conv1(x)
x = self.c_shuffle(x)
x = self.conv2(x)
x = self.conv3(x)
if self.residual:
x = x + identity
return x
class IGCV3(nn.Module):
"""
IGCV3 model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(IGCV3, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
stride=2,
activation="relu6"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
expansion = (i != 0) or (j != 0)
stage.add_module("unit{}".format(j + 1), InvResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
expansion=expansion))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
activation="relu6"))
in_channels = final_block_channels
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_igcv3(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create IGCV3-D model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
final_block_channels = 1280
layers = [1, 4, 6, 8, 6, 6, 1]
downsample = [0, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 32, 64, 96, 160, 320]
from functools import reduce
channels = reduce(
lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample),
[[]])
if width_scale != 1.0:
def make_even(x):
return x if (x % 2 == 0) else x + 1
channels = [[make_even(int(cij * width_scale)) for cij in ci] for ci in channels]
init_block_channels = make_even(int(init_block_channels * width_scale))
if width_scale > 1.0:
final_block_channels = make_even(int(final_block_channels * width_scale))
net = IGCV3(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def igcv3_w1(**kwargs):
"""
IGCV3-D 1.0x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=1.0, model_name="igcv3_w1", **kwargs)
def igcv3_w3d4(**kwargs):
"""
IGCV3-D 0.75x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=0.75, model_name="igcv3_w3d4", **kwargs)
def igcv3_wd2(**kwargs):
"""
IGCV3-D 0.5x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=0.5, model_name="igcv3_wd2", **kwargs)
def igcv3_wd4(**kwargs):
"""
IGCV3-D 0.25x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=0.25, model_name="igcv3_wd4", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
igcv3_w1,
igcv3_w3d4,
igcv3_wd2,
igcv3_wd4,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != igcv3_w1 or weight_count == 3491688)
assert (model != igcv3_w3d4 or weight_count == 2638084)
assert (model != igcv3_wd2 or weight_count == 1985528)
assert (model != igcv3_wd4 or weight_count == 1534020)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 9,829
| 30.709677
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/seresnet_cifar.py
|
"""
SE-ResNet for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['CIFARSEResNet', 'seresnet20_cifar10', 'seresnet20_cifar100', 'seresnet20_svhn',
'seresnet56_cifar10', 'seresnet56_cifar100', 'seresnet56_svhn',
'seresnet110_cifar10', 'seresnet110_cifar100', 'seresnet110_svhn',
'seresnet164bn_cifar10', 'seresnet164bn_cifar100', 'seresnet164bn_svhn',
'seresnet272bn_cifar10', 'seresnet272bn_cifar100', 'seresnet272bn_svhn',
'seresnet542bn_cifar10', 'seresnet542bn_cifar100', 'seresnet542bn_svhn',
'seresnet1001_cifar10', 'seresnet1001_cifar100', 'seresnet1001_svhn',
'seresnet1202_cifar10', 'seresnet1202_cifar100', 'seresnet1202_svhn']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv3x3_block
from .seresnet import SEResUnit
class CIFARSEResNet(nn.Module):
"""
SE-ResNet model for CIFAR from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification num_classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARSEResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), SEResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=False))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_seresnet_cifar(num_classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SE-ResNet model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification num_classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert (num_classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARSEResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def seresnet20_cifar10(num_classes=10, **kwargs):
"""
SE-ResNet-20 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="seresnet20_cifar10",
**kwargs)
def seresnet20_cifar100(num_classes=100, **kwargs):
"""
SE-ResNet-20 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="seresnet20_cifar100",
**kwargs)
def seresnet20_svhn(num_classes=10, **kwargs):
"""
SE-ResNet-20 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="seresnet20_svhn",
**kwargs)
def seresnet56_cifar10(num_classes=10, **kwargs):
"""
SE-ResNet-56 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="seresnet56_cifar10",
**kwargs)
def seresnet56_cifar100(num_classes=100, **kwargs):
"""
SE-ResNet-56 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="seresnet56_cifar100",
**kwargs)
def seresnet56_svhn(num_classes=10, **kwargs):
"""
SE-ResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="seresnet56_svhn",
**kwargs)
def seresnet110_cifar10(num_classes=10, **kwargs):
"""
SE-ResNet-110 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="seresnet110_cifar10",
**kwargs)
def seresnet110_cifar100(num_classes=100, **kwargs):
"""
SE-ResNet-110 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="seresnet110_cifar100",
**kwargs)
def seresnet110_svhn(num_classes=10, **kwargs):
"""
SE-ResNet-110 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="seresnet110_svhn",
**kwargs)
def seresnet164bn_cifar10(num_classes=10, **kwargs):
"""
SE-ResNet-164(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="seresnet164bn_cifar10",
**kwargs)
def seresnet164bn_cifar100(num_classes=100, **kwargs):
"""
SE-ResNet-164(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="seresnet164bn_cifar100",
**kwargs)
def seresnet164bn_svhn(num_classes=10, **kwargs):
"""
SE-ResNet-164(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="seresnet164bn_svhn",
**kwargs)
def seresnet272bn_cifar10(num_classes=10, **kwargs):
"""
SE-ResNet-272(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="seresnet272bn_cifar10",
**kwargs)
def seresnet272bn_cifar100(num_classes=100, **kwargs):
"""
SE-ResNet-272(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="seresnet272bn_cifar100",
**kwargs)
def seresnet272bn_svhn(num_classes=10, **kwargs):
"""
SE-ResNet-272(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="seresnet272bn_svhn",
**kwargs)
def seresnet542bn_cifar10(num_classes=10, **kwargs):
"""
SE-ResNet-542(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="seresnet542bn_cifar10",
**kwargs)
def seresnet542bn_cifar100(num_classes=100, **kwargs):
"""
SE-ResNet-542(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="seresnet542bn_cifar100",
**kwargs)
def seresnet542bn_svhn(num_classes=10, **kwargs):
"""
SE-ResNet-542(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="seresnet542bn_svhn",
**kwargs)
def seresnet1001_cifar10(num_classes=10, **kwargs):
"""
SE-ResNet-1001 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="seresnet1001_cifar10",
**kwargs)
def seresnet1001_cifar100(num_classes=100, **kwargs):
"""
SE-ResNet-1001 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="seresnet1001_cifar100",
**kwargs)
def seresnet1001_svhn(num_classes=10, **kwargs):
"""
SE-ResNet-1001 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="seresnet1001_svhn",
**kwargs)
def seresnet1202_cifar10(num_classes=10, **kwargs):
"""
SE-ResNet-1202 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="seresnet1202_cifar10",
**kwargs)
def seresnet1202_cifar100(num_classes=100, **kwargs):
"""
SE-ResNet-1202 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False,
model_name="seresnet1202_cifar100", **kwargs)
def seresnet1202_svhn(num_classes=10, **kwargs):
"""
SE-ResNet-1202 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="seresnet1202_svhn",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(seresnet20_cifar10, 10),
(seresnet20_cifar100, 100),
(seresnet20_svhn, 10),
(seresnet56_cifar10, 10),
(seresnet56_cifar100, 100),
(seresnet56_svhn, 10),
(seresnet110_cifar10, 10),
(seresnet110_cifar100, 100),
(seresnet110_svhn, 10),
(seresnet164bn_cifar10, 10),
(seresnet164bn_cifar100, 100),
(seresnet164bn_svhn, 10),
(seresnet272bn_cifar10, 10),
(seresnet272bn_cifar100, 100),
(seresnet272bn_svhn, 10),
(seresnet542bn_cifar10, 10),
(seresnet542bn_cifar100, 100),
(seresnet542bn_svhn, 10),
(seresnet1001_cifar10, 10),
(seresnet1001_cifar100, 100),
(seresnet1001_svhn, 10),
(seresnet1202_cifar10, 10),
(seresnet1202_cifar100, 100),
(seresnet1202_svhn, 10),
]
for model, num_num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet20_cifar10 or weight_count == 274847)
assert (model != seresnet20_cifar100 or weight_count == 280697)
assert (model != seresnet20_svhn or weight_count == 274847)
assert (model != seresnet56_cifar10 or weight_count == 862889)
assert (model != seresnet56_cifar100 or weight_count == 868739)
assert (model != seresnet56_svhn or weight_count == 862889)
assert (model != seresnet110_cifar10 or weight_count == 1744952)
assert (model != seresnet110_cifar100 or weight_count == 1750802)
assert (model != seresnet110_svhn or weight_count == 1744952)
assert (model != seresnet164bn_cifar10 or weight_count == 1906258)
assert (model != seresnet164bn_cifar100 or weight_count == 1929388)
assert (model != seresnet164bn_svhn or weight_count == 1906258)
assert (model != seresnet272bn_cifar10 or weight_count == 3153826)
assert (model != seresnet272bn_cifar100 or weight_count == 3176956)
assert (model != seresnet272bn_svhn or weight_count == 3153826)
assert (model != seresnet542bn_cifar10 or weight_count == 6272746)
assert (model != seresnet542bn_cifar100 or weight_count == 6295876)
assert (model != seresnet542bn_svhn or weight_count == 6272746)
assert (model != seresnet1001_cifar10 or weight_count == 11574910)
assert (model != seresnet1001_cifar100 or weight_count == 11598040)
assert (model != seresnet1001_svhn or weight_count == 11574910)
assert (model != seresnet1202_cifar10 or weight_count == 19582226)
assert (model != seresnet1202_cifar100 or weight_count == 19588076)
assert (model != seresnet1202_svhn or weight_count == 19582226)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_num_classes))
if __name__ == "__main__":
_test()
| 24,036
| 36.324534
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/resnetd.py
|
"""
ResNet(D) with dilation for ImageNet-1K, implemented in PyTorch.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNetD', 'resnetd50b', 'resnetd101b', 'resnetd152b']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import MultiOutputSequential
from .resnet import ResUnit, ResInitBlock
from .senet import SEInitBlock
class ResNetD(nn.Module):
"""
ResNet(D) with dilation model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
ordinary_init : bool, default False
Whether to use original initial block or SENet one.
bends : tuple of int, default None
Numbers of bends for multiple output.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
ordinary_init=False,
bends=None,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ResNetD, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.multi_output = (bends is not None)
self.features = MultiOutputSequential()
if ordinary_init:
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
else:
init_block_channels = 2 * init_block_channels
self.features.add_module("init_block", SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if ((j == 0) and (i != 0) and (i < 2)) else 1
dilation = (2 ** max(0, i - 1 - int(j == 0)))
stage.add_module("unit{}".format(j + 1), ResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=dilation,
dilation=dilation,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
if self.multi_output and ((i + 1) in bends):
stage.do_output = True
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
outs = self.features(x)
x = outs[0]
x = x.view(x.size(0), -1)
x = self.output(x)
if self.multi_output:
return [x] + outs[1:]
else:
return x
def get_resnetd(blocks,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ResNet(D) with dilation model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14:
layers = [2, 2, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet(D) with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNetD(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def resnetd50b(**kwargs):
"""
ResNet(D)-50 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual
Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnetd(blocks=50, conv1_stride=False, model_name="resnetd50b", **kwargs)
def resnetd101b(**kwargs):
"""
ResNet(D)-101 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual
Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnetd(blocks=101, conv1_stride=False, model_name="resnetd101b", **kwargs)
def resnetd152b(**kwargs):
"""
ResNet(D)-152 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual
Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnetd(blocks=152, conv1_stride=False, model_name="resnetd152b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
ordinary_init = False
bends = None
pretrained = False
models = [
resnetd50b,
resnetd101b,
resnetd152b,
]
for model in models:
net = model(
pretrained=pretrained,
ordinary_init=ordinary_init,
bends=bends)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
if ordinary_init:
assert (model != resnetd50b or weight_count == 25557032)
assert (model != resnetd101b or weight_count == 44549160)
assert (model != resnetd152b or weight_count == 60192808)
else:
assert (model != resnetd50b or weight_count == 25680808)
assert (model != resnetd101b or weight_count == 44672936)
assert (model != resnetd152b or weight_count == 60316584)
x = torch.randn(1, 3, 224, 224)
y = net(x)
if bends is not None:
y = y[0]
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 9,674
| 32.362069
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/quartznet.py
|
"""
QuartzNet for ASR, implemented in PyTorch.
Original paper: 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,'
https://arxiv.org/abs/1910.10261.
"""
__all__ = ['quartznet5x5_en_ls', 'quartznet15x5_en', 'quartznet15x5_en_nr', 'quartznet15x5_fr', 'quartznet15x5_de',
'quartznet15x5_it', 'quartznet15x5_es', 'quartznet15x5_ca', 'quartznet15x5_pl', 'quartznet15x5_ru',
'quartznet15x5_ru34']
from .jasper import get_jasper
def quartznet5x5_en_ls(num_classes=29, **kwargs):
"""
QuartzNet 5x5 model for English language (trained on LibriSpeech dataset) from 'QuartzNet: Deep Automatic Speech
Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
num_classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(num_classes=num_classes, version=("quartznet", "5x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet5x5_en_ls", **kwargs)
def quartznet15x5_en(num_classes=29, **kwargs):
"""
QuartzNet 15x5 model for English language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
num_classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_en", **kwargs)
def quartznet15x5_en_nr(num_classes=29, **kwargs):
"""
QuartzNet 15x5 model for English language (with presence of noise) from 'QuartzNet: Deep Automatic Speech
Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
num_classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_en_nr", **kwargs)
def quartznet15x5_fr(num_classes=43, **kwargs):
"""
QuartzNet 15x5 model for French language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
num_classes : int, default 43
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'ç', 'é', 'â', 'ê', 'î', 'ô', 'û', 'à', 'è', 'ù', 'ë', 'ï',
'ü', 'ÿ']
return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_fr", **kwargs)
def quartznet15x5_de(num_classes=32, **kwargs):
"""
QuartzNet 15x5 model for German language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
num_classes : int, default 32
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', 'ä', 'ö', 'ü', 'ß']
return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_de", **kwargs)
def quartznet15x5_it(num_classes=39, **kwargs):
"""
QuartzNet 15x5 model for Italian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
num_classes : int, default 39
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ì', 'î', 'ó', 'ò', 'ú', 'ù']
return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_it", **kwargs)
def quartznet15x5_es(num_classes=36, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
num_classes : int, default 36
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'á', 'é', 'í', 'ó', 'ú', 'ñ', 'ü']
return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_es", **kwargs)
def quartznet15x5_ca(num_classes=39, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
num_classes : int, default 39
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ï', 'ó', 'ò', 'ú', 'ü', 'ŀ']
return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ca", **kwargs)
def quartznet15x5_pl(num_classes=34, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
num_classes : int, default 34
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'ą', 'b', 'c', 'ć', 'd', 'e', 'ę', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'ł', 'm', 'n', 'ń',
'o', 'ó', 'p', 'r', 's', 'ś', 't', 'u', 'w', 'y', 'z', 'ź', 'ż']
return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_pl", **kwargs)
def quartznet15x5_ru(num_classes=35, **kwargs):
"""
QuartzNet 15x5 model for Russian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
num_classes : int, default 35
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ё', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с',
'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ru", **kwargs)
def quartznet15x5_ru34(num_classes=34, **kwargs):
"""
QuartzNet 15x5 model for Russian language (32 graphemes) from 'QuartzNet: Deep Automatic Speech Recognition with 1D
Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
num_classes : int, default 34
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т',
'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ru34", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import numpy as np
import torch
pretrained = False
from_audio = False
audio_features = 64
use_cuda = True
models = [
quartznet5x5_en_ls,
quartznet15x5_en,
quartznet15x5_en_nr,
quartznet15x5_fr,
quartznet15x5_de,
quartznet15x5_it,
quartznet15x5_es,
quartznet15x5_ca,
quartznet15x5_pl,
quartznet15x5_ru,
quartznet15x5_ru34,
]
for model in models:
net = model(
in_channels=audio_features,
from_audio=from_audio,
pretrained=pretrained)
if use_cuda:
net = net.cuda()
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != quartznet5x5_en_ls or weight_count == 6713181)
assert (model != quartznet15x5_en or weight_count == 18924381)
assert (model != quartznet15x5_en_nr or weight_count == 18924381)
assert (model != quartznet15x5_fr or weight_count == 18938731)
assert (model != quartznet15x5_de or weight_count == 18927456)
assert (model != quartznet15x5_it or weight_count == 18934631)
assert (model != quartznet15x5_es or weight_count == 18931556)
assert (model != quartznet15x5_ca or weight_count == 18934631)
assert (model != quartznet15x5_pl or weight_count == 18929506)
assert (model != quartznet15x5_ru or weight_count == 18930531)
assert (model != quartznet15x5_ru34 or weight_count == 18929506)
batch = 3
aud_scale = 640 if from_audio else 1
seq_len = np.random.randint(150, 250, batch) * aud_scale
seq_len_max = seq_len.max() + 2
x_shape = (batch, seq_len_max) if from_audio else (batch, audio_features, seq_len_max)
x = torch.randn(x_shape)
x_len = torch.tensor(seq_len, dtype=torch.long, device=x.device)
if use_cuda:
x = x.cuda()
x_len = x_len.cuda()
y, y_len = net(x, x_len)
# y.sum().backward()
assert (tuple(y.size())[:2] == (batch, net.num_classes))
if from_audio:
assert (y.size()[2] in range(seq_len_max // aud_scale * 2, seq_len_max // aud_scale * 2 + 9))
else:
assert (y.size()[2] in [seq_len_max // 2, seq_len_max // 2 + 1])
if __name__ == "__main__":
_test()
| 13,675
| 42.141956
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/preresnet.py
|
"""
PreResNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
"""
__all__ = ['PreResNet', 'preresnet10', 'preresnet12', 'preresnet14', 'preresnetbc14b', 'preresnet16', 'preresnet18_wd4',
'preresnet18_wd2', 'preresnet18_w3d4', 'preresnet18', 'preresnet26', 'preresnetbc26b', 'preresnet34',
'preresnetbc38b', 'preresnet50', 'preresnet50b', 'preresnet101', 'preresnet101b', 'preresnet152',
'preresnet152b', 'preresnet200', 'preresnet200b', 'preresnet269b', 'PreResBlock', 'PreResBottleneck',
'PreResUnit', 'PreResInitBlock', 'PreResActivation']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1
class PreResBlock(nn.Module):
"""
Simple PreResNet block for residual path in PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bias=False,
use_bn=True):
super(PreResBlock, self).__init__()
self.conv1 = pre_conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn,
return_preact=True)
self.conv2 = pre_conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn)
def forward(self, x):
x, x_pre_activ = self.conv1(x)
x = self.conv2(x)
return x, x_pre_activ
class PreResBottleneck(nn.Module):
"""
PreResNet bottleneck block for residual path in PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
conv1_stride):
super(PreResBottleneck, self).__init__()
mid_channels = out_channels // 4
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=(stride if conv1_stride else 1),
return_preact=True)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=(1 if conv1_stride else stride))
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels)
def forward(self, x):
x, x_pre_activ = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x, x_pre_activ
class PreResUnit(nn.Module):
"""
PreResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bias=False,
use_bn=True,
bottleneck=True,
conv1_stride=False):
super(PreResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = PreResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
conv1_stride=conv1_stride)
else:
self.body = PreResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn)
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias)
def forward(self, x):
identity = x
x, x_pre_activ = self.body(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x = x + identity
return x
class PreResInitBlock(nn.Module):
"""
PreResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(PreResInitBlock, self).__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.bn = nn.BatchNorm2d(num_features=out_channels)
self.activ = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
x = self.pool(x)
return x
class PreResActivation(nn.Module):
"""
PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block.
Parameters:
----------
in_channels : int
Number of input channels.
"""
def __init__(self,
in_channels):
super(PreResActivation, self).__init__()
self.bn = nn.BatchNorm2d(num_features=in_channels)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class PreResNet(nn.Module):
"""
PreResNet model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(PreResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 1 if (i == 0) or (j != 0) else 2
stage.add_module("unit{}".format(j + 1), PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_preresnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = PreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def preresnet10(**kwargs):
"""
PreResNet-10 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=10, model_name="preresnet10", **kwargs)
def preresnet12(**kwargs):
"""
PreResNet-12 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=12, model_name="preresnet12", **kwargs)
def preresnet14(**kwargs):
"""
PreResNet-14 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=14, model_name="preresnet14", **kwargs)
def preresnetbc14b(**kwargs):
"""
PreResNet-BC-14b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="preresnetbc14b", **kwargs)
def preresnet16(**kwargs):
"""
PreResNet-16 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=16, model_name="preresnet16", **kwargs)
def preresnet18_wd4(**kwargs):
"""
PreResNet-18 model with 0.25 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.25, model_name="preresnet18_wd4", **kwargs)
def preresnet18_wd2(**kwargs):
"""
PreResNet-18 model with 0.5 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.5, model_name="preresnet18_wd2", **kwargs)
def preresnet18_w3d4(**kwargs):
"""
PreResNet-18 model with 0.75 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.75, model_name="preresnet18_w3d4", **kwargs)
def preresnet18(**kwargs):
"""
PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, model_name="preresnet18", **kwargs)
def preresnet26(**kwargs):
"""
PreResNet-26 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=26, bottleneck=False, model_name="preresnet26", **kwargs)
def preresnetbc26b(**kwargs):
"""
PreResNet-BC-26b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="preresnetbc26b", **kwargs)
def preresnet34(**kwargs):
"""
PreResNet-34 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=34, model_name="preresnet34", **kwargs)
def preresnetbc38b(**kwargs):
"""
PreResNet-BC-38b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="preresnetbc38b", **kwargs)
def preresnet50(**kwargs):
"""
PreResNet-50 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=50, model_name="preresnet50", **kwargs)
def preresnet50b(**kwargs):
"""
PreResNet-50 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=50, conv1_stride=False, model_name="preresnet50b", **kwargs)
def preresnet101(**kwargs):
"""
PreResNet-101 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=101, model_name="preresnet101", **kwargs)
def preresnet101b(**kwargs):
"""
PreResNet-101 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=101, conv1_stride=False, model_name="preresnet101b", **kwargs)
def preresnet152(**kwargs):
"""
PreResNet-152 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=152, model_name="preresnet152", **kwargs)
def preresnet152b(**kwargs):
"""
PreResNet-152 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=152, conv1_stride=False, model_name="preresnet152b", **kwargs)
def preresnet200(**kwargs):
"""
PreResNet-200 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=200, model_name="preresnet200", **kwargs)
def preresnet200b(**kwargs):
"""
PreResNet-200 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=200, conv1_stride=False, model_name="preresnet200b", **kwargs)
def preresnet269b(**kwargs):
"""
PreResNet-269 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=269, conv1_stride=False, model_name="preresnet269b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
preresnet10,
preresnet12,
preresnet14,
preresnetbc14b,
preresnet16,
preresnet18_wd4,
preresnet18_wd2,
preresnet18_w3d4,
preresnet18,
preresnet26,
preresnetbc26b,
preresnet34,
preresnetbc38b,
preresnet50,
preresnet50b,
preresnet101,
preresnet101b,
preresnet152,
preresnet152b,
preresnet200,
preresnet200b,
preresnet269b,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != preresnet10 or weight_count == 5417128)
assert (model != preresnet12 or weight_count == 5491112)
assert (model != preresnet14 or weight_count == 5786536)
assert (model != preresnetbc14b or weight_count == 10057384)
assert (model != preresnet16 or weight_count == 6967208)
assert (model != preresnet18_wd4 or weight_count == 3935960)
assert (model != preresnet18_wd2 or weight_count == 5802440)
assert (model != preresnet18_w3d4 or weight_count == 8473784)
assert (model != preresnet18 or weight_count == 11687848)
assert (model != preresnet26 or weight_count == 17958568)
assert (model != preresnetbc26b or weight_count == 15987624)
assert (model != preresnet34 or weight_count == 21796008)
assert (model != preresnetbc38b or weight_count == 21917864)
assert (model != preresnet50 or weight_count == 25549480)
assert (model != preresnet50b or weight_count == 25549480)
assert (model != preresnet101 or weight_count == 44541608)
assert (model != preresnet101b or weight_count == 44541608)
assert (model != preresnet152 or weight_count == 60185256)
assert (model != preresnet152b or weight_count == 60185256)
assert (model != preresnet200 or weight_count == 64666280)
assert (model != preresnet200b or weight_count == 64666280)
assert (model != preresnet269b or weight_count == 102065832)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 26,501
| 32.044888
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/lednet.py
|
"""
LEDNet for image segmentation, implemented in PyTorch.
Original paper: 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation,'
https://arxiv.org/abs/1905.02423.
"""
__all__ = ['LEDNet', 'lednet_cityscapes']
import os
import torch
import torch.nn as nn
from .common import conv1x1_block, conv3x3_block, conv5x5_block, conv7x7_block, asym_conv3x3_block, ChannelShuffle,\
InterpolationBlock, Hourglass, BreakBlock
from .enet import ENetMixDownBlock
class LEDBranch(nn.Module):
"""
LEDNet encoder branch.
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for convolution layer.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
channels,
dilation,
dropout_rate,
bn_eps):
super(LEDBranch, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
self.conv1 = asym_conv3x3_block(
channels=channels,
bias=True,
lw_use_bn=False,
bn_eps=bn_eps)
self.conv2 = asym_conv3x3_block(
channels=channels,
padding=dilation,
dilation=dilation,
bias=True,
lw_use_bn=False,
bn_eps=bn_eps,
rw_activation=None)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
return x
class LEDUnit(nn.Module):
"""
LEDNet encoder unit (Split-Shuffle-non-bottleneck).
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for convolution layer.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
channels,
dilation,
dropout_rate,
bn_eps):
super(LEDUnit, self).__init__()
mid_channels = channels // 2
self.left_branch = LEDBranch(
channels=mid_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bn_eps=bn_eps)
self.right_branch = LEDBranch(
channels=mid_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bn_eps=bn_eps)
self.activ = nn.ReLU(inplace=True)
self.shuffle = ChannelShuffle(
channels=channels,
groups=2)
def forward(self, x):
identity = x
x1, x2 = torch.chunk(x, chunks=2, dim=1)
x1 = self.left_branch(x1)
x2 = self.right_branch(x2)
x = torch.cat((x1, x2), dim=1)
x = x + identity
x = self.activ(x)
x = self.shuffle(x)
return x
class PoolingBranch(nn.Module):
"""
Pooling branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bias : bool
Whether the layer uses a bias vector.
bn_eps : float
Small float added to variance in Batch norm.
in_size : tuple of 2 int or None
Spatial size of input image.
down_size : int
Spatial size of downscaled image.
"""
def __init__(self,
in_channels,
out_channels,
bias,
bn_eps,
in_size,
down_size):
super(PoolingBranch, self).__init__()
self.in_size = in_size
self.pool = nn.AdaptiveAvgPool2d(output_size=down_size)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
bn_eps=bn_eps)
self.up = InterpolationBlock(
scale_factor=None,
out_size=in_size)
def forward(self, x):
in_size = self.in_size if self.in_size is not None else x.shape[2:]
x = self.pool(x)
x = self.conv(x)
x = self.up(x, in_size)
return x
class APN(nn.Module):
"""
Attention pyramid network block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
in_size : tuple of 2 int or None
Spatial size of input image.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
in_size):
super(APN, self).__init__()
self.in_size = in_size
att_out_channels = 1
self.pool_branch = PoolingBranch(
in_channels=in_channels,
out_channels=out_channels,
bias=True,
bn_eps=bn_eps,
in_size=in_size,
down_size=1)
self.body = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=True,
bn_eps=bn_eps)
down_seq = nn.Sequential()
down_seq.add_module("down1", conv7x7_block(
in_channels=in_channels,
out_channels=att_out_channels,
stride=2,
bias=True,
bn_eps=bn_eps))
down_seq.add_module("down2", conv5x5_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
stride=2,
bias=True,
bn_eps=bn_eps))
down3_subseq = nn.Sequential()
down3_subseq.add_module("conv1", conv3x3_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
stride=2,
bias=True,
bn_eps=bn_eps))
down3_subseq.add_module("conv2", conv3x3_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
bias=True,
bn_eps=bn_eps))
down_seq.add_module("down3", down3_subseq)
up_seq = nn.Sequential()
up = InterpolationBlock(scale_factor=2)
up_seq.add_module("up1", up)
up_seq.add_module("up2", up)
up_seq.add_module("up3", up)
skip_seq = nn.Sequential()
skip_seq.add_module("skip1", BreakBlock())
skip_seq.add_module("skip2", conv7x7_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
bias=True,
bn_eps=bn_eps))
skip_seq.add_module("skip3", conv5x5_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
bias=True,
bn_eps=bn_eps))
self.hg = Hourglass(
down_seq=down_seq,
up_seq=up_seq,
skip_seq=skip_seq)
def forward(self, x):
y = self.pool_branch(x)
w = self.hg(x)
x = self.body(x)
x = x * w
x = x + y
return x
class LEDNet(nn.Module):
"""
LEDNet model from 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation,'
https://arxiv.org/abs/1905.02423.
Parameters:
----------
channels : list of int
Number of output channels for each unit.
dilations : list of int
Dilations for units.
dropout_rates : list of list of int
Dropout rates for each unit in encoder.
correct_size_mistmatch : bool
Whether to correct downscaled sizes of images in encoder.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
num_classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
channels,
dilations,
dropout_rates,
correct_size_mismatch=False,
bn_eps=1e-5,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
num_classes=19):
super(LEDNet, self).__init__()
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.num_classes = num_classes
self.fixed_size = fixed_size
self.encoder = nn.Sequential()
for i, dilations_per_stage in enumerate(dilations):
out_channels = channels[i]
dropout_rate = dropout_rates[i]
stage = nn.Sequential()
for j, dilation in enumerate(dilations_per_stage):
if j == 0:
stage.add_module("unit{}".format(j + 1), ENetMixDownBlock(
in_channels=in_channels,
out_channels=out_channels,
bias=True,
bn_eps=bn_eps,
correct_size_mismatch=correct_size_mismatch))
in_channels = out_channels
else:
stage.add_module("unit{}".format(j + 1), LEDUnit(
channels=in_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bn_eps=bn_eps))
self.encoder.add_module("stage{}".format(i + 1), stage)
self.apn = APN(
in_channels=in_channels,
out_channels=num_classes,
bn_eps=bn_eps,
in_size=(in_size[0] // 8, in_size[1] // 8) if fixed_size else None)
self.up = InterpolationBlock(
scale_factor=8,
align_corners=True)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.encoder(x)
x = self.apn(x)
x = self.up(x)
return x
def get_lednet(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create LEDNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels = [32, 64, 128]
dilations = [[0, 1, 1, 1], [0, 1, 1], [0, 1, 2, 5, 9, 2, 5, 9, 17]]
dropout_rates = [0.03, 0.03, 0.3]
bn_eps = 1e-3
net = LEDNet(
channels=channels,
dilations=dilations,
dropout_rates=dropout_rates,
bn_eps=bn_eps,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def lednet_cityscapes(num_classes=19, **kwargs):
"""
LEDNet model for Cityscapes from 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic
Segmentation,' https://arxiv.org/abs/1905.02423.
Parameters:
----------
num_classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_lednet(num_classes=num_classes, model_name="lednet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
pretrained = False
fixed_size = True
correct_size_mismatch = False
in_size = (1024, 2048)
classes = 19
models = [
lednet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size,
correct_size_mismatch=correct_size_mismatch)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != lednet_cityscapes or weight_count == 922821)
batch = 4
x = torch.randn(batch, 3, in_size[0], in_size[1])
y = net(x)
# y.sum().backward()
assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 13,638
| 29.241685
| 116
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/superpointnet.py
|
"""
SuperPointNet for HPatches (image matching), implemented in PyTorch.
Original paper: 'SuperPoint: Self-Supervised Interest Point Detection and Description,'
https://arxiv.org/abs/1712.07629.
"""
__all__ = ['SuperPointNet', 'superpointnet']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from .common import conv1x1, conv3x3_block
class SPHead(nn.Module):
"""
SuperPointNet head block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels):
super(SPHead, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
bias=True,
use_bn=False)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
bias=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class SPDetector(nn.Module):
"""
SuperPointNet detector.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
conf_thresh : float, default 0.015
Confidence threshold.
nms_dist : int, default 4
NMS distance.
border_size : int, default 4
Image border size to remove points.
reduction : int, default 8
Feature reduction factor.
"""
def __init__(self,
in_channels,
mid_channels,
conf_thresh=0.015,
nms_dist=4,
border_size=4,
reduction=8):
super(SPDetector, self).__init__()
self.conf_thresh = conf_thresh
self.nms_dist = nms_dist
self.border_size = border_size
self.reduction = reduction
num_classes = reduction * reduction + 1
self.detector = SPHead(
in_channels=in_channels,
mid_channels=mid_channels,
out_channels=num_classes)
def forward(self, x):
batch = x.size(0)
x_height, x_width = x.size()[-2:]
img_height = x_height * self.reduction
img_width = x_width * self.reduction
semi = self.detector(x)
dense = semi.softmax(dim=1)
nodust = dense[:, :-1, :, :]
heatmap = nodust.permute(0, 2, 3, 1)
heatmap = heatmap.reshape((-1, x_height, x_width, self.reduction, self.reduction))
heatmap = heatmap.permute(0, 1, 3, 2, 4)
heatmap = heatmap.reshape((-1, 1, x_height * self.reduction, x_width * self.reduction))
heatmap_mask = (heatmap >= self.conf_thresh)
pad = self.nms_dist
bord = self.border_size + pad
heatmap_mask2 = F.pad(heatmap_mask, pad=(pad, pad, pad, pad))
pts_list = []
confs_list = []
for i in range(batch):
heatmap_i = heatmap[i, 0]
heatmap_mask_i = heatmap_mask[i, 0]
heatmap_mask2_i = heatmap_mask2[i, 0]
src_pts = torch.nonzero(heatmap_mask_i)
src_confs = torch.masked_select(heatmap_i, heatmap_mask_i)
src_inds = torch.argsort(src_confs, descending=True)
dst_inds = torch.zeros_like(src_inds)
dst_pts_count = 0
for ind_j in src_inds:
pt = src_pts[ind_j] + pad
assert (pad <= pt[0] < heatmap_mask2_i.shape[0] - pad)
assert (pad <= pt[1] < heatmap_mask2_i.shape[1] - pad)
assert (0 <= pt[0] - pad < img_height)
assert (0 <= pt[1] - pad < img_width)
if heatmap_mask2_i[pt[0], pt[1]] == 1:
heatmap_mask2_i[(pt[0] - pad):(pt[0] + pad + 1), (pt[1] - pad):(pt[1] + pad + 1)] = 0
if (bord < pt[0] - pad <= img_height - bord) and (bord < pt[1] - pad <= img_width - bord):
dst_inds[dst_pts_count] = ind_j
dst_pts_count += 1
dst_inds = dst_inds[:dst_pts_count]
dst_pts = torch.index_select(src_pts, dim=0, index=dst_inds)
dst_confs = torch.index_select(src_confs, dim=0, index=dst_inds)
pts_list.append(dst_pts)
confs_list.append(dst_confs)
return pts_list, confs_list
class SPDescriptor(nn.Module):
"""
SuperPointNet descriptor generator.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
descriptor_length : int, default 256
Descriptor length.
transpose_descriptors : bool, default True
Whether transpose descriptors with respect to points.
reduction : int, default 8
Feature reduction factor.
"""
def __init__(self,
in_channels,
mid_channels,
descriptor_length=256,
transpose_descriptors=True,
reduction=8):
super(SPDescriptor, self).__init__()
self.desc_length = descriptor_length
self.transpose_descriptors = transpose_descriptors
self.reduction = reduction
self.head = SPHead(
in_channels=in_channels,
mid_channels=mid_channels,
out_channels=descriptor_length)
def forward(self, x, pts_list):
x_height, x_width = x.size()[-2:]
coarse_desc_map = self.head(x)
coarse_desc_map = F.normalize(coarse_desc_map)
descriptors_list = []
for i, pts in enumerate(pts_list):
pts = pts.float()
pts[:, 0] = pts[:, 0] / (0.5 * x_height * self.reduction) - 1.0
pts[:, 1] = pts[:, 1] / (0.5 * x_width * self.reduction) - 1.0
if self.transpose_descriptors:
pts = torch.index_select(pts, dim=1, index=torch.tensor([1, 0], device=pts.device))
pts = pts.unsqueeze(0).unsqueeze(0)
descriptors = F.grid_sample(coarse_desc_map[i:(i + 1)], pts)
descriptors = descriptors.squeeze(0).squeeze(1)
descriptors = descriptors.transpose(0, 1)
descriptors = F.normalize(descriptors)
descriptors_list.append(descriptors)
return descriptors_list
class SuperPointNet(nn.Module):
"""
SuperPointNet model from 'SuperPoint: Self-Supervised Interest Point Detection and Description,'
https://arxiv.org/abs/1712.07629.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
final_block_channels : int
Number of output channels for the final units.
transpose_descriptors : bool, default True
Whether transpose descriptors with respect to points.
in_channels : int, default 1
Number of input channels.
"""
def __init__(self,
channels,
final_block_channels,
transpose_descriptors=True,
in_channels=1):
super(SuperPointNet, self).__init__()
self.features = nn.Sequential()
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
if (j == 0) and (i != 0):
stage.add_module("reduce{}".format(i + 1), nn.MaxPool2d(
kernel_size=2,
stride=2))
stage.add_module("unit{}".format(j + 1), conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bias=True,
use_bn=False))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.detector = SPDetector(
in_channels=in_channels,
mid_channels=final_block_channels)
self.descriptor = SPDescriptor(
in_channels=in_channels,
mid_channels=final_block_channels,
transpose_descriptors=transpose_descriptors)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
assert (x.size(1) == 1)
x = self.features(x)
pts_list, confs_list = self.detector(x)
descriptors_list = self.descriptor(x, pts_list)
return pts_list, confs_list, descriptors_list
def get_superpointnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SuperPointNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels_per_layers = [64, 64, 128, 128]
layers = [2, 2, 2, 2]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
final_block_channels = 256
net = SuperPointNet(
channels=channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def superpointnet(**kwargs):
"""
SuperPointNet model from 'SuperPoint: Self-Supervised Interest Point Detection and Description,'
https://arxiv.org/abs/1712.07629.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_superpointnet(model_name="superpointnet", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
superpointnet,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != superpointnet or weight_count == 1300865)
# x = torch.randn(1, 1, 224, 224)
x = torch.randn(1, 1, 1000, 2000)
y = net(x)
# y.sum().backward()
assert (len(y) == 3)
if __name__ == "__main__":
_test()
| 11,418
| 31.719198
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/ibndensenet.py
|
"""
IBN-DenseNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
"""
__all__ = ['IBNDenseNet', 'ibn_densenet121', 'ibn_densenet161', 'ibn_densenet169', 'ibn_densenet201']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import pre_conv3x3_block, IBN
from .preresnet import PreResInitBlock, PreResActivation
from .densenet import TransitionBlock
class IBNPreConvBlock(nn.Module):
"""
IBN-Net specific convolution block with BN/IBN normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
use_ibn : bool, default False
Whether use Instance-Batch Normalization.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
use_ibn=False,
return_preact=False):
super(IBNPreConvBlock, self).__init__()
self.use_ibn = use_ibn
self.return_preact = return_preact
if self.use_ibn:
self.ibn = IBN(
channels=in_channels,
first_fraction=0.6,
inst_first=False)
else:
self.bn = nn.BatchNorm2d(num_features=in_channels)
self.activ = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False)
def forward(self, x):
if self.use_ibn:
x = self.ibn(x)
else:
x = self.bn(x)
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def ibn_pre_conv1x1_block(in_channels,
out_channels,
stride=1,
use_ibn=False,
return_preact=False):
"""
1x1 version of the IBN-Net specific pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
use_ibn : bool, default False
Whether use Instance-Batch Normalization.
return_preact : bool, default False
Whether return pre-activation.
"""
return IBNPreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
use_ibn=use_ibn,
return_preact=return_preact)
class IBNDenseUnit(nn.Module):
"""
IBN-DenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
dropout_rate,
conv1_ibn):
super(IBNDenseUnit, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
self.conv1 = ibn_pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_ibn=conv1_ibn)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=inc_channels)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
x = torch.cat((identity, x), dim=1)
return x
class IBNDenseNet(nn.Module):
"""
IBN-DenseNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
dropout_rate=0.0,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(IBNDenseNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
if i != 0:
stage.add_module("trans{}".format(i + 1), TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2)))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
conv1_ibn = (i < 3) and (j % 3 == 0)
stage.add_module("unit{}".format(j + 1), IBNDenseUnit(
in_channels=in_channels,
out_channels=out_channels,
dropout_rate=dropout_rate,
conv1_ibn=conv1_ibn))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_ibndensenet(num_layers,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create IBN-DenseNet model with specific parameters.
Parameters:
----------
num_layers : int
Number of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if num_layers == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif num_layers == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif num_layers == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif num_layers == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError("Unsupported IBN-DenseNet version with number of layers {}".format(num_layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = IBNDenseNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def ibn_densenet121(**kwargs):
"""
IBN-DenseNet-121 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=121, model_name="ibn_densenet121", **kwargs)
def ibn_densenet161(**kwargs):
"""
IBN-DenseNet-161 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=161, model_name="ibn_densenet161", **kwargs)
def ibn_densenet169(**kwargs):
"""
IBN-DenseNet-169 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=169, model_name="ibn_densenet169", **kwargs)
def ibn_densenet201(**kwargs):
"""
IBN-DenseNet-201 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=201, model_name="ibn_densenet201", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
ibn_densenet121,
ibn_densenet161,
ibn_densenet169,
ibn_densenet201,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibn_densenet121 or weight_count == 7978856)
assert (model != ibn_densenet161 or weight_count == 28681000)
assert (model != ibn_densenet169 or weight_count == 14149480)
assert (model != ibn_densenet201 or weight_count == 20013928)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 12,647
| 30.384615
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/hardnet.py
|
"""
HarDNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
"""
__all__ = ['HarDNet', 'hardnet39ds', 'hardnet68ds', 'hardnet68', 'hardnet85']
import os
import torch
import torch.nn as nn
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv_block
class InvDwsConvBlock(nn.Module):
"""
Inverse depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
pw_activation=(lambda: nn.ReLU(inplace=True)),
dw_activation=(lambda: nn.ReLU(inplace=True))):
super(InvDwsConvBlock, self).__init__()
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=pw_activation)
self.dw_conv = dwconv_block(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=dw_activation)
def forward(self, x):
x = self.pw_conv(x)
x = self.dw_conv(x)
return x
def invdwsconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
pw_activation=(lambda: nn.ReLU(inplace=True)),
dw_activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 inverse depthwise separable version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
"""
return InvDwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
pw_activation=pw_activation,
dw_activation=dw_activation)
class HarDUnit(nn.Module):
"""
HarDNet unit.
Parameters:
----------
in_channels_list : list of int
Number of input channels for each block.
out_channels_list : list of int
Number of output channels for each block.
links_list : list of list of int
List of indices for each layer.
use_deptwise : bool
Whether to use depthwise downsampling.
use_dropout : bool
Whether to use dropout module.
downsampling : bool
Whether to downsample input.
activation : str
Name of activation function.
"""
def __init__(self,
in_channels_list,
out_channels_list,
links_list,
use_deptwise,
use_dropout,
downsampling,
activation):
super(HarDUnit, self).__init__()
self.links_list = links_list
self.use_dropout = use_dropout
self.downsampling = downsampling
self.blocks = nn.Sequential()
for i in range(len(links_list)):
in_channels = in_channels_list[i]
out_channels = out_channels_list[i]
if use_deptwise:
unit = invdwsconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
pw_activation=activation,
dw_activation=None)
else:
unit = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels)
self.blocks.add_module("block{}".format(i + 1), unit)
if self.use_dropout:
self.dropout = nn.Dropout(p=0.1)
self.conv = conv1x1_block(
in_channels=in_channels_list[-1],
out_channels=out_channels_list[-1],
activation=activation)
if self.downsampling:
if use_deptwise:
self.downsample = dwconv3x3_block(
in_channels=out_channels_list[-1],
out_channels=out_channels_list[-1],
stride=2,
activation=None)
else:
self.downsample = nn.MaxPool2d(
kernel_size=2,
stride=2)
def forward(self, x):
layer_outs = [x]
for links_i, layer_i in zip(self.links_list, self.blocks._modules.values()):
layer_in = []
for idx_ij in links_i:
layer_in.append(layer_outs[idx_ij])
if len(layer_in) > 1:
x = torch.cat(layer_in, dim=1)
else:
x = layer_in[0]
out = layer_i(x)
layer_outs.append(out)
outs = []
for i, layer_out_i in enumerate(layer_outs):
if (i == len(layer_outs) - 1) or (i % 2 == 1):
outs.append(layer_out_i)
x = torch.cat(outs, dim=1)
if self.use_dropout:
x = self.dropout(x)
x = self.conv(x)
if self.downsampling:
x = self.downsample(x)
return x
class HarDInitBlock(nn.Module):
"""
HarDNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_deptwise : bool
Whether to use depthwise downsampling.
activation : str
Name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
use_deptwise,
activation):
super(HarDInitBlock, self).__init__()
mid_channels = out_channels // 2
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=2,
activation=activation)
conv2_block_class = conv1x1_block if use_deptwise else conv3x3_block
self.conv2 = conv2_block_class(
in_channels=mid_channels,
out_channels=out_channels,
activation=activation)
if use_deptwise:
self.downsample = dwconv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
stride=2,
activation=None)
else:
self.downsample = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.downsample(x)
return x
class HarDNet(nn.Module):
"""
HarDNet model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
Parameters:
----------
init_block_channels : int
Number of output channels for the initial unit.
unit_in_channels : list of list of list of int
Number of input channels for each layer in each stage.
unit_out_channels : list list of of list of int
Number of output channels for each layer in each stage.
unit_links : list of list of list of int
List of indices for each layer in each stage.
use_deptwise : bool
Whether to use depthwise downsampling.
use_last_dropout : bool
Whether to use dropouts in the last unit.
output_dropout_rate : float
Parameter of Dropout layer before classifier. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
init_block_channels,
unit_in_channels,
unit_out_channels,
unit_links,
use_deptwise,
use_last_dropout,
output_dropout_rate,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(HarDNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
activation = "relu6"
self.features = nn.Sequential()
self.features.add_module("init_block", HarDInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
use_deptwise=use_deptwise,
activation=activation))
for i, (in_channels_list_i, out_channels_list_i) in enumerate(zip(unit_in_channels, unit_out_channels)):
stage = nn.Sequential()
for j, (in_channels_list_ij, out_channels_list_ij) in enumerate(zip(in_channels_list_i,
out_channels_list_i)):
use_dropout = ((j == len(in_channels_list_i) - 1) and (i == len(unit_in_channels) - 1) and
use_last_dropout)
downsampling = ((j == len(in_channels_list_i) - 1) and (i != len(unit_in_channels) - 1))
stage.add_module("unit{}".format(j + 1), HarDUnit(
in_channels_list=in_channels_list_ij,
out_channels_list=out_channels_list_ij,
links_list=unit_links[i][j],
use_deptwise=use_deptwise,
use_dropout=use_dropout,
downsampling=downsampling,
activation=activation))
self.features.add_module("stage{}".format(i + 1), stage)
in_channels = unit_out_channels[-1][-1][-1]
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Sequential()
self.output.add_module("dropout", nn.Dropout(p=output_dropout_rate))
self.output.add_module("fc", nn.Linear(
in_features=in_channels,
out_features=num_classes))
self._init_params()
def _init_params(self):
for module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight, mode="fan_out", nonlinearity="relu")
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.BatchNorm2d):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_hardnet(blocks,
use_deptwise=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create HarDNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
use_deepwise : bool, default True
Whether to use depthwise separable version of the model.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 39:
init_block_channels = 48
growth_factor = 1.6
dropout_rate = 0.05 if use_deptwise else 0.1
layers = [4, 16, 8, 4]
channels_per_layers = [96, 320, 640, 1024]
growth_rates = [16, 20, 64, 160]
downsamples = [1, 1, 1, 0]
use_dropout = False
elif blocks == 68:
init_block_channels = 64
growth_factor = 1.7
dropout_rate = 0.05 if use_deptwise else 0.1
layers = [8, 16, 16, 16, 4]
channels_per_layers = [128, 256, 320, 640, 1024]
growth_rates = [14, 16, 20, 40, 160]
downsamples = [1, 0, 1, 1, 0]
use_dropout = False
elif blocks == 85:
init_block_channels = 96
growth_factor = 1.7
dropout_rate = 0.05 if use_deptwise else 0.2
layers = [8, 16, 16, 16, 16, 4]
channels_per_layers = [192, 256, 320, 480, 720, 1280]
growth_rates = [24, 24, 28, 36, 48, 256]
downsamples = [1, 0, 1, 0, 1, 0]
use_dropout = True
else:
raise ValueError("Unsupported HarDNet version with number of layers {}".format(blocks))
assert (downsamples[-1] == 0)
def calc_stage_params():
def calc_unit_params():
def calc_blocks_params(layer_idx,
base_channels,
growth_rate):
if layer_idx == 0:
return base_channels, 0, []
out_channels_ij = growth_rate
links_ij = []
for k in range(10):
dv = 2 ** k
if layer_idx % dv == 0:
t = layer_idx - dv
links_ij.append(t)
if k > 0:
out_channels_ij *= growth_factor
out_channels_ij = int(int(out_channels_ij + 1) / 2) * 2
in_channels_ij = 0
for t in links_ij:
out_channels_ik, _, _ = calc_blocks_params(
layer_idx=t,
base_channels=base_channels,
growth_rate=growth_rate)
in_channels_ij += out_channels_ik
return out_channels_ij, in_channels_ij, links_ij
unit_out_channels = []
unit_in_channels = []
unit_links = []
for num_layers, growth_rate, base_channels, channels_per_layers_i in zip(
layers, growth_rates, [init_block_channels] + channels_per_layers[:-1], channels_per_layers):
stage_out_channels_i = 0
unit_out_channels_i = []
unit_in_channels_i = []
unit_links_i = []
for j in range(num_layers):
out_channels_ij, in_channels_ij, links_ij = calc_blocks_params(
layer_idx=(j + 1),
base_channels=base_channels,
growth_rate=growth_rate)
unit_out_channels_i.append(out_channels_ij)
unit_in_channels_i.append(in_channels_ij)
unit_links_i.append(links_ij)
if (j % 2 == 0) or (j == num_layers - 1):
stage_out_channels_i += out_channels_ij
unit_in_channels_i.append(stage_out_channels_i)
unit_out_channels_i.append(channels_per_layers_i)
unit_out_channels.append(unit_out_channels_i)
unit_in_channels.append(unit_in_channels_i)
unit_links.append(unit_links_i)
return unit_out_channels, unit_in_channels, unit_links
unit_out_channels, unit_in_channels, unit_links = calc_unit_params()
stage_out_channels = []
stage_in_channels = []
stage_links = []
stage_out_channels_k = None
for i in range(len(layers)):
if stage_out_channels_k is None:
stage_out_channels_k = []
stage_in_channels_k = []
stage_links_k = []
stage_out_channels_k.append(unit_out_channels[i])
stage_in_channels_k.append(unit_in_channels[i])
stage_links_k.append(unit_links[i])
if (downsamples[i] == 1) or (i == len(layers) - 1):
stage_out_channels.append(stage_out_channels_k)
stage_in_channels.append(stage_in_channels_k)
stage_links.append(stage_links_k)
stage_out_channels_k = None
return stage_out_channels, stage_in_channels, stage_links
stage_out_channels, stage_in_channels, stage_links = calc_stage_params()
net = HarDNet(
init_block_channels=init_block_channels,
unit_in_channels=stage_in_channels,
unit_out_channels=stage_out_channels,
unit_links=stage_links,
use_deptwise=use_deptwise,
use_last_dropout=use_dropout,
output_dropout_rate=dropout_rate,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def hardnet39ds(**kwargs):
"""
HarDNet-39DS (Depthwise Separable) model from 'HarDNet: A Low Memory Traffic Network,'
https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=39, use_deptwise=True, model_name="hardnet39ds", **kwargs)
def hardnet68ds(**kwargs):
"""
HarDNet-68DS (Depthwise Separable) model from 'HarDNet: A Low Memory Traffic Network,'
https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=68, use_deptwise=True, model_name="hardnet68ds", **kwargs)
def hardnet68(**kwargs):
"""
HarDNet-68 model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=68, use_deptwise=False, model_name="hardnet68", **kwargs)
def hardnet85(**kwargs):
"""
HarDNet-85 model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=85, use_deptwise=False, model_name="hardnet85", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
hardnet39ds,
hardnet68ds,
hardnet68,
hardnet85,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != hardnet39ds or weight_count == 3488228)
assert (model != hardnet68ds or weight_count == 4180602)
assert (model != hardnet68 or weight_count == 17565348)
assert (model != hardnet85 or weight_count == 36670212)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 21,984
| 34.176
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/sinet.py
|
"""
SINet for image segmentation, implemented in PyTorch.
Original paper: 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and
Information Blocking Decoder,' https://arxiv.org/abs/1911.09099.
"""
__all__ = ['SINet', 'sinet_cityscapes']
import os
import torch
import torch.nn as nn
from .common import conv1x1, get_activation_layer, conv1x1_block, conv3x3_block, round_channels, dwconv_block,\
Concurrent, InterpolationBlock, ChannelShuffle
class SEBlock(nn.Module):
"""
SINet version of Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,'
https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
round_mid : bool, default False
Whether to round middle channel number (make divisible by 8).
activation : function, or str, or nn.Module, default 'relu'
Activation function after the first convolution.
out_activation : function, or str, or nn.Module, default 'sigmoid'
Activation function after the last convolution.
"""
def __init__(self,
channels,
reduction=16,
round_mid=False,
mid_activation=(lambda: nn.ReLU(inplace=True)),
out_activation=(lambda: nn.Sigmoid())):
super(SEBlock, self).__init__()
self.use_conv2 = (reduction > 1)
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
self.fc1 = nn.Linear(
in_features=channels,
out_features=mid_channels)
if self.use_conv2:
self.activ = get_activation_layer(mid_activation)
self.fc2 = nn.Linear(
in_features=mid_channels,
out_features=channels)
self.sigmoid = get_activation_layer(out_activation)
def forward(self, x):
w = self.pool(x)
w = w.squeeze(dim=-1).squeeze(dim=-1)
w = self.fc1(w)
if self.use_conv2:
w = self.activ(w)
w = self.fc2(w)
w = self.sigmoid(w)
w = w.unsqueeze(dim=-1).unsqueeze(dim=-1)
x = x * w
return x
class DwsConvBlock(nn.Module):
"""
SINet version of depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
se_reduction : int, default 0
Squeeze reduction value (0 means no-se).
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
dw_use_bn=True,
pw_use_bn=True,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True)),
se_reduction=0):
super(DwsConvBlock, self).__init__()
self.use_se = (se_reduction > 0)
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=dw_use_bn,
bn_eps=bn_eps,
activation=dw_activation)
if self.use_se:
self.se = SEBlock(
channels=in_channels,
reduction=se_reduction,
round_mid=False,
mid_activation=(lambda: nn.PReLU(in_channels // se_reduction)),
out_activation=(lambda: nn.PReLU(in_channels)))
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
use_bn=pw_use_bn,
bn_eps=bn_eps,
activation=pw_activation)
def forward(self, x):
x = self.dw_conv(x)
if self.use_se:
x = self.se(x)
x = self.pw_conv(x)
return x
def dwsconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
dw_use_bn=True,
pw_use_bn=True,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True)),
se_reduction=0):
"""
3x3 depthwise separable version of the standard convolution block (SINet version).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
se_reduction : int, default 0
Squeeze reduction value (0 means no-se).
"""
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
dw_use_bn=dw_use_bn,
pw_use_bn=pw_use_bn,
bn_eps=bn_eps,
dw_activation=dw_activation,
pw_activation=pw_activation,
se_reduction=se_reduction)
def dwconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 depthwise version of the standard convolution block (SINet version).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
activation=activation)
class FDWConvBlock(nn.Module):
"""
Factorized depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the each convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(FDWConvBlock, self).__init__()
assert use_bn
self.activate = (activation is not None)
self.v_conv = dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(kernel_size, 1),
stride=stride,
padding=(padding, 0),
dilation=dilation,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=None)
self.h_conv = dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(1, kernel_size),
stride=stride,
padding=(0, padding),
dilation=dilation,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=None)
if self.activate:
self.act = get_activation_layer(activation)
def forward(self, x):
x = self.v_conv(x) + self.h_conv(x)
if self.activate:
x = self.act(x)
return x
def fdwconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 factorized depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return FDWConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def fdwconv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
5x5 factorized depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return FDWConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
class SBBlock(nn.Module):
"""
SB-block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size for a factorized depthwise separable convolution block.
scale_factor : int
Scale factor.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
scale_factor,
bn_eps):
super(SBBlock, self).__init__()
self.use_scale = (scale_factor > 1)
if self.use_scale:
self.down_scale = nn.AvgPool2d(
kernel_size=scale_factor,
stride=scale_factor)
self.up_scale = InterpolationBlock(scale_factor=scale_factor)
use_fdw = (scale_factor > 0)
if use_fdw:
fdwconv3x3_class = fdwconv3x3_block if kernel_size == 3 else fdwconv5x5_block
self.conv1 = fdwconv3x3_class(
in_channels=in_channels,
out_channels=in_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(in_channels)))
else:
self.conv1 = dwconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(in_channels)))
self.conv2 = conv1x1(
in_channels=in_channels,
out_channels=out_channels)
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
def forward(self, x):
if self.use_scale:
x = self.down_scale(x)
x = self.conv1(x)
x = self.conv2(x)
if self.use_scale:
x = self.up_scale(x)
x = self.bn(x)
return x
class PreActivation(nn.Module):
"""
PreResNet like pure pre-activation block without convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
bn_eps):
super(PreActivation, self).__init__()
self.bn = nn.BatchNorm2d(
num_features=in_channels,
eps=bn_eps)
self.activ = nn.PReLU(num_parameters=in_channels)
def forward(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class ESPBlock(nn.Module):
"""
ESP block, which is based on the following principle: Reduce ---> Split ---> Transform --> Merge.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_sizes : list of int
Convolution window size for branches.
scale_factors : list of int
Scale factor for branches.
use_residual : bool
Whether to use residual connection.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
kernel_sizes,
scale_factors,
use_residual,
bn_eps):
super(ESPBlock, self).__init__()
self.use_residual = use_residual
groups = len(kernel_sizes)
mid_channels = int(out_channels / groups)
res_channels = out_channels - groups * mid_channels
self.conv = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=groups)
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups)
self.branches = Concurrent()
for i in range(groups):
out_channels_i = (mid_channels + res_channels) if i == 0 else mid_channels
self.branches.add_module("branch{}".format(i + 1), SBBlock(
in_channels=mid_channels,
out_channels=out_channels_i,
kernel_size=kernel_sizes[i],
scale_factor=scale_factors[i],
bn_eps=bn_eps))
self.preactiv = PreActivation(
in_channels=out_channels,
bn_eps=bn_eps)
def forward(self, x):
if self.use_residual:
identity = x
x = self.conv(x)
x = self.c_shuffle(x)
x = self.branches(x)
if self.use_residual:
x = identity + x
x = self.preactiv(x)
return x
class SBStage(nn.Module):
"""
SB stage.
Parameters:
----------
in_channels : int
Number of input channels.
down_channels : int
Number of output channels for a downscale block.
channels_list : list of int
Number of output channels for all residual block.
kernel_sizes_list : list of int
Convolution window size for branches.
scale_factors_list : list of int
Scale factor for branches.
use_residual_list : list of int
List of flags for using residual in each ESP-block.
se_reduction : int
Squeeze reduction value (0 means no-se).
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
down_channels,
channels_list,
kernel_sizes_list,
scale_factors_list,
use_residual_list,
se_reduction,
bn_eps):
super(SBStage, self).__init__()
self.down_conv = dwsconv3x3_block(
in_channels=in_channels,
out_channels=down_channels,
stride=2,
dw_use_bn=False,
bn_eps=bn_eps,
dw_activation=None,
pw_activation=(lambda: nn.PReLU(down_channels)),
se_reduction=se_reduction)
in_channels = down_channels
self.main_branch = nn.Sequential()
for i, out_channels in enumerate(channels_list):
use_residual = (use_residual_list[i] == 1)
kernel_sizes = kernel_sizes_list[i]
scale_factors = scale_factors_list[i]
self.main_branch.add_module("block{}".format(i + 1), ESPBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_sizes=kernel_sizes,
scale_factors=scale_factors,
use_residual=use_residual,
bn_eps=bn_eps))
in_channels = out_channels
self.preactiv = PreActivation(
in_channels=(down_channels + in_channels),
bn_eps=bn_eps)
def forward(self, x):
x = self.down_conv(x)
y = self.main_branch(x)
x = torch.cat((x, y), dim=1)
x = self.preactiv(x)
return x, y
class SBEncoderInitBlock(nn.Module):
"""
SB encoder specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
bn_eps):
super(SBEncoderInitBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=2,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(mid_channels)))
self.conv2 = dwsconv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
stride=2,
dw_use_bn=False,
bn_eps=bn_eps,
dw_activation=None,
pw_activation=(lambda: nn.PReLU(out_channels)),
se_reduction=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class SBEncoder(nn.Module):
"""
SB encoder for SINet.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of input channels.
init_block_channels : list int
Number of output channels for convolutions in the initial block.
down_channels_list : list of int
Number of downsample channels for each residual block.
channels_list : list of list of int
Number of output channels for all residual block.
kernel_sizes_list : list of list of int
Convolution window size for each residual block.
scale_factors_list : list of list of int
Scale factor for each residual block.
use_residual_list : list of list of int
List of flags for using residual in each residual block.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
init_block_channels,
down_channels_list,
channels_list,
kernel_sizes_list,
scale_factors_list,
use_residual_list,
bn_eps):
super(SBEncoder, self).__init__()
self.init_block = SBEncoderInitBlock(
in_channels=in_channels,
mid_channels=init_block_channels[0],
out_channels=init_block_channels[1],
bn_eps=bn_eps)
in_channels = init_block_channels[1]
self.stage1 = SBStage(
in_channels=in_channels,
down_channels=down_channels_list[0],
channels_list=channels_list[0],
kernel_sizes_list=kernel_sizes_list[0],
scale_factors_list=scale_factors_list[0],
use_residual_list=use_residual_list[0],
se_reduction=1,
bn_eps=bn_eps)
in_channels = down_channels_list[0] + channels_list[0][-1]
self.stage2 = SBStage(
in_channels=in_channels,
down_channels=down_channels_list[1],
channels_list=channels_list[1],
kernel_sizes_list=kernel_sizes_list[1],
scale_factors_list=scale_factors_list[1],
use_residual_list=use_residual_list[1],
se_reduction=2,
bn_eps=bn_eps)
in_channels = down_channels_list[1] + channels_list[1][-1]
self.output = conv1x1(
in_channels=in_channels,
out_channels=out_channels)
def forward(self, x):
y1 = self.init_block(x)
x, y2 = self.stage1(y1)
x, _ = self.stage2(x)
x = self.output(x)
return x, y2, y1
class SBDecodeBlock(nn.Module):
"""
SB decoder block for SINet.
Parameters:
----------
channels : int
Number of output classes.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
channels,
bn_eps):
super(SBDecodeBlock, self).__init__()
self.up = InterpolationBlock(
scale_factor=2,
align_corners=False)
self.bn = nn.BatchNorm2d(
num_features=channels,
eps=bn_eps)
self.conf = nn.Softmax2d()
def forward(self, x, y):
x = self.up(x)
x = self.bn(x)
w_conf = self.conf(x)
w_max = (torch.max(w_conf, dim=1)[0]).unsqueeze(1).expand_as(x)
x = y * (1 - w_max) + x
return x
class SBDecoder(nn.Module):
"""
SB decoder for SINet.
Parameters:
----------
dim2 : int
Size of dimension #2.
num_classes : int
Number of segmentation classes.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
dim2,
num_classes,
bn_eps):
super(SBDecoder, self).__init__()
self.decode1 = SBDecodeBlock(
channels=num_classes,
bn_eps=bn_eps)
self.decode2 = SBDecodeBlock(
channels=num_classes,
bn_eps=bn_eps)
self.conv3c = conv1x1_block(
in_channels=dim2,
out_channels=num_classes,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(num_classes)))
self.output = nn.ConvTranspose2d(
in_channels=num_classes,
out_channels=num_classes,
kernel_size=2,
stride=2,
padding=0,
output_padding=0,
bias=False)
self.up = InterpolationBlock(scale_factor=2)
def forward(self, y3, y2, y1):
y2 = self.conv3c(y2)
x = self.decode1(y3, y2)
x = self.decode2(x, y1)
x = self.output(x)
x = self.up(x)
return x
class SINet(nn.Module):
"""
SINet model from 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and
Information Blocking Decoder,' https://arxiv.org/abs/1911.09099.
Parameters:
----------
down_channels_list : list of int
Number of downsample channels for each residual block.
channels_list : list of list of int
Number of output channels for all residual block.
kernel_sizes_list : list of list of int
Convolution window size for each residual block.
scale_factors_list : list of list of int
Scale factor for each residual block.
use_residual_list : list of list of int
List of flags for using residual in each residual block.
dim2 : int
Size of dimension #2.
bn_eps : float
Small float added to variance in Batch norm.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
num_classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
down_channels_list,
channels_list,
kernel_sizes_list,
scale_factors_list,
use_residual_list,
dim2,
bn_eps,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
num_classes=21):
super(SINet, self).__init__()
assert (fixed_size is not None)
assert (in_channels > 0)
assert ((in_size[0] % 64 == 0) and (in_size[1] % 64 == 0))
self.in_size = in_size
self.num_classes = num_classes
self.aux = aux
init_block_channels = [16, num_classes]
out_channels = num_classes
self.encoder = SBEncoder(
in_channels=in_channels,
out_channels=out_channels,
init_block_channels=init_block_channels,
down_channels_list=down_channels_list,
channels_list=channels_list,
kernel_sizes_list=kernel_sizes_list,
scale_factors_list=scale_factors_list,
use_residual_list=use_residual_list,
bn_eps=bn_eps)
self.decoder = SBDecoder(
dim2=dim2,
num_classes=num_classes,
bn_eps=bn_eps)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
y3, y2, y1 = self.encoder(x)
x = self.decoder(y3, y2, y1)
if self.aux:
return x, y3
else:
return x
def get_sinet(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SINet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
kernel_sizes_list = [
[[3, 5], [3, 3], [3, 3]],
[[3, 5], [3, 3], [5, 5], [3, 5], [3, 5], [3, 5], [3, 3], [5, 5], [3, 5], [3, 5]]]
scale_factors_list = [
[[1, 1], [0, 1], [0, 1]],
[[1, 1], [0, 1], [1, 4], [2, 8], [1, 1], [1, 1], [0, 1], [1, 8], [2, 4], [0, 2]]]
chnn = 4
dims = [24] + [24 * (i + 2) + 4 * (chnn - 1) for i in range(3)]
dim1 = dims[0]
dim2 = dims[1]
dim3 = dims[2]
dim4 = dims[3]
p = len(kernel_sizes_list[0])
q = len(kernel_sizes_list[1])
channels_list = [[dim2] * p, ([dim3] * (q // 2)) + ([dim4] * (q - q // 2))]
use_residual_list = [[0] + ([1] * (p - 1)), [0] + ([1] * (q // 2 - 1)) + [0] + ([1] * (q - q // 2 - 1))]
down_channels_list = [dim1, dim2]
net = SINet(
down_channels_list=down_channels_list,
channels_list=channels_list,
kernel_sizes_list=kernel_sizes_list,
scale_factors_list=scale_factors_list,
use_residual_list=use_residual_list,
dim2=dims[1],
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def sinet_cityscapes(num_classes=19, **kwargs):
"""
SINet model for Cityscapes from 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze
Modules and Information Blocking Decoder,' https://arxiv.org/abs/1911.09099.
Parameters:
----------
num_classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sinet(num_classes=num_classes, bn_eps=1e-3, model_name="sinet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
in_size = (1024, 2048)
aux = False
fixed_size = True
pretrained = False
models = [
sinet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, aux=aux, fixed_size=fixed_size)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sinet_cityscapes or weight_count == 119418)
batch = 14
x = torch.randn(batch, 3, in_size[0], in_size[1])
ys = net(x)
y = ys[0] if aux else ys
# y.sum().backward()
assert (tuple(y.size()) == (batch, 19, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 33,876
| 30.929312
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/shufflenetv2b.py
|
"""
ShuffleNet V2 for ImageNet-1K, implemented in PyTorch. The alternative version.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['ShuffleNetV2b', 'shufflenetv2b_wd2', 'shufflenetv2b_w1', 'shufflenetv2b_w3d2', 'shufflenetv2b_w2']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, ChannelShuffle, ChannelShuffle2, SEBlock
class ShuffleUnit(nn.Module):
"""
ShuffleNetV2(b) unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
shuffle_group_first : bool
Whether to use channel shuffle in group first mode.
"""
def __init__(self,
in_channels,
out_channels,
downsample,
use_se,
use_residual,
shuffle_group_first):
super(ShuffleUnit, self).__init__()
self.downsample = downsample
self.use_se = use_se
self.use_residual = use_residual
mid_channels = out_channels // 2
in_channels2 = in_channels // 2
assert (in_channels % 2 == 0)
y2_in_channels = (in_channels if downsample else in_channels2)
y2_out_channels = out_channels - y2_in_channels
self.conv1 = conv1x1_block(
in_channels=y2_in_channels,
out_channels=mid_channels)
self.dconv = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=(2 if self.downsample else 1),
activation=None)
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=y2_out_channels)
if self.use_se:
self.se = SEBlock(channels=y2_out_channels)
if downsample:
self.shortcut_dconv = dwconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
stride=2,
activation=None)
self.shortcut_conv = conv1x1_block(
in_channels=in_channels,
out_channels=in_channels)
if shuffle_group_first:
self.c_shuffle = ChannelShuffle(
channels=out_channels,
groups=2)
else:
self.c_shuffle = ChannelShuffle2(
channels=out_channels,
groups=2)
def forward(self, x):
if self.downsample:
y1 = self.shortcut_dconv(x)
y1 = self.shortcut_conv(y1)
x2 = x
else:
y1, x2 = torch.chunk(x, chunks=2, dim=1)
y2 = self.conv1(x2)
y2 = self.dconv(y2)
y2 = self.conv2(y2)
if self.use_se:
y2 = self.se(y2)
if self.use_residual and not self.downsample:
y2 = y2 + x2
x = torch.cat((y1, y2), dim=1)
x = self.c_shuffle(x)
return x
class ShuffleInitBlock(nn.Module):
"""
ShuffleNetV2(b) specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(ShuffleInitBlock, self).__init__()
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1,
ceil_mode=False)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class ShuffleNetV2b(nn.Module):
"""
ShuffleNetV2(b) model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
shuffle_group_first : bool, default True
Whether to use channel shuffle in group first mode.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
shuffle_group_first=True,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ShuffleNetV2b, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ShuffleInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
stage.add_module("unit{}".format(j + 1), ShuffleUnit(
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=use_se,
use_residual=use_residual,
shuffle_group_first=shuffle_group_first))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels))
in_channels = final_block_channels
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_shufflenetv2b(width_scale,
shuffle_group_first=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ShuffleNetV2(b) model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
shuffle_group_first : bool, default True
Whether to use channel shuffle in group first mode.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = ShuffleNetV2b(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
shuffle_group_first=shuffle_group_first,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def shufflenetv2b_wd2(**kwargs):
"""
ShuffleNetV2(b) 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=(12.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_wd2",
**kwargs)
def shufflenetv2b_w1(**kwargs):
"""
ShuffleNetV2(b) 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=1.0,
shuffle_group_first=True,
model_name="shufflenetv2b_w1",
**kwargs)
def shufflenetv2b_w3d2(**kwargs):
"""
ShuffleNetV2(b) 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=(44.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_w3d2",
**kwargs)
def shufflenetv2b_w2(**kwargs):
"""
ShuffleNetV2(b) 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=(61.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_w2",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
shufflenetv2b_wd2,
shufflenetv2b_w1,
shufflenetv2b_w3d2,
shufflenetv2b_w2,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2b_wd2 or weight_count == 1366792)
assert (model != shufflenetv2b_w1 or weight_count == 2279760)
assert (model != shufflenetv2b_w3d2 or weight_count == 4410194)
assert (model != shufflenetv2b_w2 or weight_count == 7611290)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 12,431
| 30.553299
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/sparsenet.py
|
"""
SparseNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
"""
__all__ = ['SparseNet', 'sparsenet121', 'sparsenet161', 'sparsenet169', 'sparsenet201', 'sparsenet264']
import os
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import pre_conv1x1_block, pre_conv3x3_block
from .preresnet import PreResInitBlock, PreResActivation
from .densenet import TransitionBlock
def sparsenet_exponential_fetch(lst):
"""
SparseNet's specific exponential fetch.
Parameters:
----------
lst : list
List of something.
Returns:
-------
list
Filtered list.
"""
return [lst[len(lst) - 2**i] for i in range(1 + math.floor(math.log(len(lst), 2)))]
class SparseBlock(nn.Module):
"""
SparseNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
dropout_rate):
super(SparseBlock, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
mid_channels = out_channels * bn_size
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
return x
class SparseStage(nn.Module):
"""
SparseNet stage.
Parameters:
----------
in_channels : int
Number of input channels.
channels_per_stage : list of int
Number of output channels for each unit in stage.
growth_rate : int
Growth rate for blocks.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
do_transition : bool
Whether use transition block.
"""
def __init__(self,
in_channels,
channels_per_stage,
growth_rate,
dropout_rate,
do_transition):
super(SparseStage, self).__init__()
self.do_transition = do_transition
if self.do_transition:
self.trans = TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2))
in_channels = in_channels // 2
self.blocks = nn.Sequential()
for i, out_channels in enumerate(channels_per_stage):
self.blocks.add_module("block{}".format(i + 1), SparseBlock(
in_channels=in_channels,
out_channels=growth_rate,
dropout_rate=dropout_rate))
in_channels = out_channels
def forward(self, x):
if self.do_transition:
x = self.trans(x)
outs = [x]
for block in self.blocks._modules.values():
y = block(x)
outs.append(y)
flt_outs = sparsenet_exponential_fetch(outs)
x = torch.cat(tuple(flt_outs), dim=1)
return x
class SparseNet(nn.Module):
"""
SparseNet model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
growth_rate : int
Growth rate for blocks.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
growth_rate,
dropout_rate=0.0,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SparseNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SparseStage(
in_channels=in_channels,
channels_per_stage=channels_per_stage,
growth_rate=growth_rate,
dropout_rate=dropout_rate,
do_transition=(i != 0))
in_channels = channels_per_stage[-1]
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_sparsenet(num_layers,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SparseNet model with specific parameters.
Parameters:
----------
num_layers : int
Number of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if num_layers == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif num_layers == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif num_layers == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif num_layers == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
elif num_layers == 264:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 64, 48]
else:
raise ValueError("Unsupported SparseNet version with number of layers {}".format(num_layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [sum(sparsenet_exponential_fetch([xj[0]] + [yj[0]] * (yj[1] + 1)))],
zip([growth_rate] * yi, range(yi)),
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = SparseNet(
channels=channels,
init_block_channels=init_block_channels,
growth_rate=growth_rate,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def sparsenet121(**kwargs):
"""
SparseNet-121 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=121, model_name="sparsenet121", **kwargs)
def sparsenet161(**kwargs):
"""
SparseNet-161 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=161, model_name="sparsenet161", **kwargs)
def sparsenet169(**kwargs):
"""
SparseNet-169 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=169, model_name="sparsenet169", **kwargs)
def sparsenet201(**kwargs):
"""
SparseNet-201 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=201, model_name="sparsenet201", **kwargs)
def sparsenet264(**kwargs):
"""
SparseNet-264 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=264, model_name="sparsenet264", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
sparsenet121,
sparsenet161,
sparsenet169,
sparsenet201,
sparsenet264,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sparsenet121 or weight_count == 3250824)
assert (model != sparsenet161 or weight_count == 9853288)
assert (model != sparsenet169 or weight_count == 4709864)
assert (model != sparsenet201 or weight_count == 5703144)
assert (model != sparsenet264 or weight_count == 7717224)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 11,646
| 29.569554
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/menet.py
|
"""
MENet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,'
https://arxiv.org/abs/1803.09127.
"""
__all__ = ['MENet', 'menet108_8x1_g3', 'menet128_8x1_g4', 'menet160_8x1_g8', 'menet228_12x1_g3', 'menet256_12x1_g4',
'menet348_12x1_g3', 'menet352_12x1_g8', 'menet456_24x1_g3']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, conv3x3, depthwise_conv3x3, ChannelShuffle
class MEUnit(nn.Module):
"""
MENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
side_channels : int
Number of side channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
side_channels,
groups,
downsample,
ignore_group):
super(MEUnit, self).__init__()
self.downsample = downsample
mid_channels = out_channels // 4
if downsample:
out_channels -= in_channels
# residual branch
self.compress_conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=(1 if ignore_group else groups))
self.compress_bn1 = nn.BatchNorm2d(num_features=mid_channels)
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups)
self.dw_conv2 = depthwise_conv3x3(
channels=mid_channels,
stride=(2 if self.downsample else 1))
self.dw_bn2 = nn.BatchNorm2d(num_features=mid_channels)
self.expand_conv3 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
groups=groups)
self.expand_bn3 = nn.BatchNorm2d(num_features=out_channels)
if downsample:
self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
self.activ = nn.ReLU(inplace=True)
# fusion branch
self.s_merge_conv = conv1x1(
in_channels=mid_channels,
out_channels=side_channels)
self.s_merge_bn = nn.BatchNorm2d(num_features=side_channels)
self.s_conv = conv3x3(
in_channels=side_channels,
out_channels=side_channels,
stride=(2 if self.downsample else 1))
self.s_conv_bn = nn.BatchNorm2d(num_features=side_channels)
self.s_evolve_conv = conv1x1(
in_channels=side_channels,
out_channels=mid_channels)
self.s_evolve_bn = nn.BatchNorm2d(num_features=mid_channels)
def forward(self, x):
identity = x
# pointwise group convolution 1
x = self.compress_conv1(x)
x = self.compress_bn1(x)
x = self.activ(x)
x = self.c_shuffle(x)
# merging
y = self.s_merge_conv(x)
y = self.s_merge_bn(y)
y = self.activ(y)
# depthwise convolution (bottleneck)
x = self.dw_conv2(x)
x = self.dw_bn2(x)
# evolution
y = self.s_conv(y)
y = self.s_conv_bn(y)
y = self.activ(y)
y = self.s_evolve_conv(y)
y = self.s_evolve_bn(y)
y = torch.sigmoid(y)
x = x * y
# pointwise group convolution 2
x = self.expand_conv3(x)
x = self.expand_bn3(x)
# identity branch
if self.downsample:
identity = self.avgpool(identity)
x = torch.cat((x, identity), dim=1)
else:
x = x + identity
x = self.activ(x)
return x
class MEInitBlock(nn.Module):
"""
MENet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(MEInitBlock, self).__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.bn = nn.BatchNorm2d(num_features=out_channels)
self.activ = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
x = self.pool(x)
return x
class MENet(nn.Module):
"""
MENet model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,'
https://arxiv.org/abs/1803.09127.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
side_channels : int
Number of side channels in a ME-unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
side_channels,
groups,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(MENet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", MEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = (i == 0) and (j == 0)
stage.add_module("unit{}".format(j + 1), MEUnit(
in_channels=in_channels,
out_channels=out_channels,
side_channels=side_channels,
groups=groups,
downsample=downsample,
ignore_group=ignore_group))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_menet(first_stage_channels,
side_channels,
groups,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create MENet model with specific parameters.
Parameters:
----------
first_stage_channels : int
Number of output channels at the first stage.
side_channels : int
Number of side channels in a ME-unit.
groups : int
Number of groups in convolution layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
layers = [4, 8, 4]
if first_stage_channels == 108:
init_block_channels = 12
channels_per_layers = [108, 216, 432]
elif first_stage_channels == 128:
init_block_channels = 12
channels_per_layers = [128, 256, 512]
elif first_stage_channels == 160:
init_block_channels = 16
channels_per_layers = [160, 320, 640]
elif first_stage_channels == 228:
init_block_channels = 24
channels_per_layers = [228, 456, 912]
elif first_stage_channels == 256:
init_block_channels = 24
channels_per_layers = [256, 512, 1024]
elif first_stage_channels == 348:
init_block_channels = 24
channels_per_layers = [348, 696, 1392]
elif first_stage_channels == 352:
init_block_channels = 24
channels_per_layers = [352, 704, 1408]
elif first_stage_channels == 456:
init_block_channels = 48
channels_per_layers = [456, 912, 1824]
else:
raise ValueError("The {} of `first_stage_channels` is not supported".format(first_stage_channels))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = MENet(
channels=channels,
init_block_channels=init_block_channels,
side_channels=side_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def menet108_8x1_g3(**kwargs):
"""
108-MENet-8x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=108, side_channels=8, groups=3, model_name="menet108_8x1_g3", **kwargs)
def menet128_8x1_g4(**kwargs):
"""
128-MENet-8x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=128, side_channels=8, groups=4, model_name="menet128_8x1_g4", **kwargs)
def menet160_8x1_g8(**kwargs):
"""
160-MENet-8x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=160, side_channels=8, groups=8, model_name="menet160_8x1_g8", **kwargs)
def menet228_12x1_g3(**kwargs):
"""
228-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=228, side_channels=12, groups=3, model_name="menet228_12x1_g3", **kwargs)
def menet256_12x1_g4(**kwargs):
"""
256-MENet-12x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=256, side_channels=12, groups=4, model_name="menet256_12x1_g4", **kwargs)
def menet348_12x1_g3(**kwargs):
"""
348-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=348, side_channels=12, groups=3, model_name="menet348_12x1_g3", **kwargs)
def menet352_12x1_g8(**kwargs):
"""
352-MENet-12x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=352, side_channels=12, groups=8, model_name="menet352_12x1_g8", **kwargs)
def menet456_24x1_g3(**kwargs):
"""
456-MENet-24x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=456, side_channels=24, groups=3, model_name="menet456_24x1_g3", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
menet108_8x1_g3,
menet128_8x1_g4,
menet160_8x1_g8,
menet228_12x1_g3,
menet256_12x1_g4,
menet348_12x1_g3,
menet352_12x1_g8,
menet456_24x1_g3,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != menet108_8x1_g3 or weight_count == 654516)
assert (model != menet128_8x1_g4 or weight_count == 750796)
assert (model != menet160_8x1_g8 or weight_count == 850120)
assert (model != menet228_12x1_g3 or weight_count == 1806568)
assert (model != menet256_12x1_g4 or weight_count == 1888240)
assert (model != menet348_12x1_g3 or weight_count == 3368128)
assert (model != menet352_12x1_g8 or weight_count == 2272872)
assert (model != menet456_24x1_g3 or weight_count == 5304784)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 15,917
| 31.956522
| 116
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/voca.py
|
"""
VOCA for speech-driven facial animation, implemented in PyTorch.
Original paper: 'Capture, Learning, and Synthesis of 3D Speaking Styles,' https://arxiv.org/abs/1905.03079.
"""
__all__ = ['VOCA', 'voca8flame']
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common import ConvBlock
class VocaEncoder(nn.Module):
"""
VOCA encoder.
Parameters:
----------
audio_features : int
Number of audio features (characters/sounds).
audio_window_size : int
Size of audio window (for time related audio features).
base_persons : int
Number of base persons (subjects).
encoder_features : int
Number of encoder features.
"""
def __init__(self,
audio_features,
audio_window_size,
base_persons,
encoder_features):
super(VocaEncoder, self).__init__()
self.audio_window_size = audio_window_size
channels = (32, 32, 64, 64)
fc1_channels = 128
self.bn = nn.BatchNorm2d(num_features=1)
in_channels = audio_features + base_persons
self.branch = nn.Sequential()
for i, out_channels in enumerate(channels):
self.branch.add_module("conv{}".format(i + 1), ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 1),
stride=(2, 1),
padding=(1, 0),
bias=True,
use_bn=False))
in_channels = out_channels
in_channels += base_persons
self.fc1 = nn.Linear(
in_features=in_channels,
out_features=fc1_channels)
self.fc2 = nn.Linear(
in_features=fc1_channels,
out_features=encoder_features)
def forward(self, x, pid):
x = self.bn(x)
x = x.transpose(1, 3).contiguous()
y = pid.unsqueeze(-1).unsqueeze(-1)
y = y.repeat(1, 1, self.audio_window_size, 1)
x = torch.cat((x, y), dim=1)
x = self.branch(x)
x = x.view(x.size(0), -1)
x = torch.cat((x, pid), dim=1)
x = self.fc1(x)
x = x.tanh()
x = self.fc2(x)
return x
class VOCA(nn.Module):
"""
VOCA model from 'Capture, Learning, and Synthesis of 3D Speaking Styles,' https://arxiv.org/abs/1905.03079.
Parameters:
----------
audio_features : int, default 29
Number of audio features (characters/sounds).
audio_window_size : int, default 16
Size of audio window (for time related audio features).
base_persons : int, default 8
Number of base persons (subjects).
encoder_features : int, default 50
Number of encoder features.
vertices : int, default 5023
Number of 3D geometry vertices.
"""
def __init__(self,
audio_features=29,
audio_window_size=16,
base_persons=8,
encoder_features=50,
vertices=5023):
super(VOCA, self).__init__()
self.base_persons = base_persons
self.encoder = VocaEncoder(
audio_features=audio_features,
audio_window_size=audio_window_size,
base_persons=base_persons,
encoder_features=encoder_features)
self.decoder = nn.Linear(
in_features=encoder_features,
out_features=(3 * vertices))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x, pid):
pid = F.one_hot(pid.long(), num_classes=self.base_persons).type_as(pid)
x = self.encoder(x, pid)
x = self.decoder(x)
x = x.view(x.size(0), 1, -1, 3)
return x
def get_voca(base_persons,
vertices,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create VOCA model with specific parameters.
Parameters:
----------
base_persons : int
Number of base persons (subjects).
vertices : int
Number of 3D geometry vertices.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
net = VOCA(
base_persons=base_persons,
vertices=vertices,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def voca8flame(**kwargs):
"""
VOCA-8-FLAME model for 8 base persons and FLAME topology from 'Capture, Learning, and Synthesis of 3D Speaking
Styles,' https://arxiv.org/abs/1905.03079.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_voca(base_persons=8, vertices=5023, model_name="voca8flame", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
voca8flame,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != voca8flame or weight_count == 809563)
batch = 14
audio_features = 29
audio_window_size = 16
vertices = 5023
x = torch.randn(batch, 1, audio_window_size, audio_features)
pid = torch.full(size=(batch,), fill_value=3)
y = net(x, pid)
# y.sum().backward()
assert (y.shape == (batch, 1, vertices, 3))
if __name__ == "__main__":
_test()
| 6,683
| 28.575221
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/shakeshakeresnet_cifar.py
|
"""
Shake-Shake-ResNet for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
"""
__all__ = ['CIFARShakeShakeResNet', 'shakeshakeresnet20_2x16d_cifar10', 'shakeshakeresnet20_2x16d_cifar100',
'shakeshakeresnet20_2x16d_svhn', 'shakeshakeresnet26_2x32d_cifar10', 'shakeshakeresnet26_2x32d_cifar100',
'shakeshakeresnet26_2x32d_svhn']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, conv3x3_block
from .resnet import ResBlock, ResBottleneck
class ShakeShake(torch.autograd.Function):
"""
Shake-Shake function.
"""
@staticmethod
def forward(ctx, x1, x2, alpha):
y = alpha * x1 + (1 - alpha) * x2
return y
@staticmethod
def backward(ctx, dy):
beta = torch.rand(dy.size(0), dtype=dy.dtype, device=dy.device).view(-1, 1, 1, 1)
return beta * dy, (1 - beta) * dy, None
class ShakeShakeShortcut(nn.Module):
"""
Shake-Shake-ResNet shortcut.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride):
super(ShakeShakeShortcut, self).__init__()
assert (out_channels % 2 == 0)
mid_channels = out_channels // 2
self.pool = nn.AvgPool2d(
kernel_size=1,
stride=stride)
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.bn = nn.BatchNorm2d(num_features=out_channels)
self.pad = nn.ZeroPad2d(padding=(1, 0, 1, 0))
def forward(self, x):
x1 = self.pool(x)
x1 = self.conv1(x1)
x2 = x[:, :, :-1, :-1].contiguous()
x2 = self.pad(x2)
x2 = self.pool(x2)
x2 = self.conv2(x2)
x = torch.cat((x1, x2), dim=1)
x = self.bn(x)
return x
class ShakeShakeResUnit(nn.Module):
"""
Shake-Shake-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck):
super(ShakeShakeResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
branch_class = ResBottleneck if bottleneck else ResBlock
self.branch1 = branch_class(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.branch2 = branch_class(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
if self.resize_identity:
self.identity_branch = ShakeShakeShortcut(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.activ = nn.ReLU(inplace=True)
self.shake_shake = ShakeShake.apply
def forward(self, x):
if self.resize_identity:
identity = self.identity_branch(x)
else:
identity = x
x1 = self.branch1(x)
x2 = self.branch2(x)
if self.training:
alpha = torch.rand(x1.size(0), dtype=x1.dtype, device=x1.device).view(-1, 1, 1, 1)
x = self.shake_shake(x1, x2, alpha)
else:
x = 0.5 * (x1 + x2)
x = x + identity
x = self.activ(x)
return x
class CIFARShakeShakeResNet(nn.Module):
"""
Shake-Shake-ResNet model for CIFAR from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARShakeShakeResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), ShakeShakeResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_shakeshakeresnet_cifar(classes,
blocks,
bottleneck,
first_stage_channels=16,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create Shake-Shake-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
first_stage_channels : int, default 16
Number of output channels for the first stage.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
from functools import reduce
channels_per_layers = reduce(lambda x, y: x + [x[-1] * 2], range(2), [first_stage_channels])
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARShakeShakeResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
num_classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def shakeshakeresnet20_2x16d_cifar10(classes=10, **kwargs):
"""
Shake-Shake-ResNet-20-2x16d model for CIFAR-10 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16,
model_name="shakeshakeresnet20_2x16d_cifar10", **kwargs)
def shakeshakeresnet20_2x16d_cifar100(classes=100, **kwargs):
"""
Shake-Shake-ResNet-20-2x16d model for CIFAR-100 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16,
model_name="shakeshakeresnet20_2x16d_cifar100", **kwargs)
def shakeshakeresnet20_2x16d_svhn(classes=10, **kwargs):
"""
Shake-Shake-ResNet-20-2x16d model for SVHN from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16,
model_name="shakeshakeresnet20_2x16d_svhn", **kwargs)
def shakeshakeresnet26_2x32d_cifar10(classes=10, **kwargs):
"""
Shake-Shake-ResNet-26-2x32d model for CIFAR-10 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32,
model_name="shakeshakeresnet26_2x32d_cifar10", **kwargs)
def shakeshakeresnet26_2x32d_cifar100(classes=100, **kwargs):
"""
Shake-Shake-ResNet-26-2x32d model for CIFAR-100 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32,
model_name="shakeshakeresnet26_2x32d_cifar100", **kwargs)
def shakeshakeresnet26_2x32d_svhn(classes=10, **kwargs):
"""
Shake-Shake-ResNet-26-2x32d model for SVHN from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32,
model_name="shakeshakeresnet26_2x32d_svhn", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(shakeshakeresnet20_2x16d_cifar10, 10),
(shakeshakeresnet20_2x16d_cifar100, 100),
(shakeshakeresnet20_2x16d_svhn, 10),
(shakeshakeresnet26_2x32d_cifar10, 10),
(shakeshakeresnet26_2x32d_cifar100, 100),
(shakeshakeresnet26_2x32d_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shakeshakeresnet20_2x16d_cifar10 or weight_count == 541082)
assert (model != shakeshakeresnet20_2x16d_cifar100 or weight_count == 546932)
assert (model != shakeshakeresnet20_2x16d_svhn or weight_count == 541082)
assert (model != shakeshakeresnet26_2x32d_cifar10 or weight_count == 2923162)
assert (model != shakeshakeresnet26_2x32d_cifar100 or weight_count == 2934772)
assert (model != shakeshakeresnet26_2x32d_svhn or weight_count == 2923162)
x = torch.randn(14, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (14, num_classes))
if __name__ == "__main__":
_test()
| 14,392
| 33.269048
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/sqnet.py
|
"""
SQNet for image segmentation, implemented in PyTorch.
Original paper: 'Speeding up Semantic Segmentation for Autonomous Driving,'
https://openreview.net/pdf?id=S1uHiFyyg.
"""
__all__ = ['SQNet', 'sqnet_cityscapes']
import os
import torch
import torch.nn as nn
from .common import conv1x1_block, conv3x3_block, deconv3x3_block, Concurrent, Hourglass
class FireBlock(nn.Module):
"""
SQNet specific encoder block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bias : bool
Whether the layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layer.
activation : function or str or None
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
bias,
use_bn,
activation):
super(FireBlock, self).__init__()
squeeze_channels = out_channels // 8
expand_channels = out_channels // 2
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=squeeze_channels,
bias=bias,
use_bn=use_bn,
activation=activation)
self.branches = Concurrent(merge_type="cat")
self.branches.add_module("branch1", conv1x1_block(
in_channels=squeeze_channels,
out_channels=expand_channels,
bias=bias,
use_bn=use_bn,
activation=None))
self.branches.add_module("branch2", conv3x3_block(
in_channels=squeeze_channels,
out_channels=expand_channels,
bias=bias,
use_bn=use_bn,
activation=None))
self.activ = nn.ELU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.branches(x)
x = self.activ(x)
return x
class ParallelDilatedConv(nn.Module):
"""
SQNet specific decoder block (parallel dilated convolution).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bias : bool
Whether the layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layer.
activation : function or str or None
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
bias,
use_bn,
activation):
super(ParallelDilatedConv, self).__init__()
dilations = [1, 2, 3, 4]
self.branches = Concurrent(merge_type="sum")
for i, dilation in enumerate(dilations):
self.branches.add_module("branch{}".format(i + 1), conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
padding=dilation,
dilation=dilation,
bias=bias,
use_bn=use_bn,
activation=activation))
def forward(self, x):
x = self.branches(x)
return x
class SQNetUpStage(nn.Module):
"""
SQNet upscale stage.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bias : bool
Whether the layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layer.
activation : function or str or None
Activation function or name of activation function.
use_parallel_conv : bool
Whether to use parallel dilated convolution.
"""
def __init__(self,
in_channels,
out_channels,
bias,
use_bn,
activation,
use_parallel_conv):
super(SQNetUpStage, self).__init__()
if use_parallel_conv:
self.conv = ParallelDilatedConv(
in_channels=in_channels,
out_channels=in_channels,
bias=bias,
use_bn=use_bn,
activation=activation)
else:
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
bias=bias,
use_bn=use_bn,
activation=activation)
self.deconv = deconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2,
bias=bias,
use_bn=use_bn,
activation=activation)
def forward(self, x):
x = self.conv(x)
x = self.deconv(x)
return x
class SQNet(nn.Module):
"""
SQNet model from 'Speeding up Semantic Segmentation for Autonomous Driving,'
https://openreview.net/pdf?id=S1uHiFyyg.
Parameters:
----------
channels : list of list of int
Number of output channels for each stage in encoder and decoder.
init_block_channels : int
Number of output channels for the initial unit.
layers : list of int
Number of layers for each stage in encoder.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
num_classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
channels,
init_block_channels,
layers,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
num_classes=19):
super(SQNet, self).__init__()
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.num_classes = num_classes
self.fixed_size = fixed_size
bias = True
use_bn = False
activation = (lambda: nn.ELU(inplace=True))
self.stem = conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
stride=2,
bias=bias,
use_bn=use_bn,
activation=activation)
in_channels = init_block_channels
down_seq = nn.Sequential()
skip_seq = nn.Sequential()
for i, out_channels in enumerate(channels[0]):
skip_seq.add_module("skip{}".format(i + 1), conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
bias=bias,
use_bn=use_bn,
activation=activation))
stage = nn.Sequential()
stage.add_module("unit1", nn.MaxPool2d(
kernel_size=2,
stride=2))
for j in range(layers[i]):
stage.add_module("unit{}".format(j + 2), FireBlock(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn,
activation=activation))
in_channels = out_channels
down_seq.add_module("down{}".format(i + 1), stage)
in_channels = in_channels // 2
up_seq = nn.Sequential()
for i, out_channels in enumerate(channels[1]):
use_parallel_conv = True if i == 0 else False
up_seq.add_module("up{}".format(i + 1), SQNetUpStage(
in_channels=(2 * in_channels),
out_channels=out_channels,
bias=bias,
use_bn=use_bn,
activation=activation,
use_parallel_conv=use_parallel_conv))
in_channels = out_channels
up_seq = up_seq[::-1]
self.hg = Hourglass(
down_seq=down_seq,
up_seq=up_seq,
skip_seq=skip_seq,
merge_type="cat")
self.head = SQNetUpStage(
in_channels=(2 * in_channels),
out_channels=num_classes,
bias=bias,
use_bn=use_bn,
activation=activation,
use_parallel_conv=False)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.stem(x)
x = self.hg(x)
x = self.head(x)
return x
def get_sqnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SQNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels = [[128, 256, 512], [256, 128, 96]]
init_block_channels = 96
layers = [2, 2, 3]
net = SQNet(
channels=channels,
init_block_channels=init_block_channels,
layers=layers,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def sqnet_cityscapes(num_classes=19, **kwargs):
"""
SQNet model for Cityscapes from 'Speeding up Semantic Segmentation for Autonomous Driving,'
https://openreview.net/pdf?id=S1uHiFyyg.
Parameters:
----------
num_classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sqnet(num_classes=num_classes, model_name="sqnet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
pretrained = False
fixed_size = True
in_size = (1024, 2048)
classes = 19
models = [
sqnet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sqnet_cityscapes or weight_count == 16262771)
batch = 4
x = torch.randn(batch, 3, in_size[0], in_size[1])
y = net(x)
# y.sum().backward()
assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 11,602
| 29.374346
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/wrn_cifar.py
|
"""
WRN for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
"""
__all__ = ['CIFARWRN', 'wrn16_10_cifar10', 'wrn16_10_cifar100', 'wrn16_10_svhn', 'wrn28_10_cifar10',
'wrn28_10_cifar100', 'wrn28_10_svhn', 'wrn40_8_cifar10', 'wrn40_8_cifar100', 'wrn40_8_svhn']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv3x3
from .preresnet import PreResUnit, PreResActivation
class CIFARWRN(nn.Module):
"""
WRN model for CIFAR from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARWRN, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=False,
conv1_stride=False))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_wrn_cifar(num_classes,
blocks,
width_factor,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create WRN model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
blocks : int
Number of blocks.
width_factor : int
Wide scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert ((blocks - 4) % 6 == 0)
layers = [(blocks - 4) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci * width_factor] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CIFARWRN(
channels=channels,
init_block_channels=init_block_channels,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def wrn16_10_cifar10(num_classes=10, **kwargs):
"""
WRN-16-10 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=16, width_factor=10, model_name="wrn16_10_cifar10", **kwargs)
def wrn16_10_cifar100(num_classes=100, **kwargs):
"""
WRN-16-10 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=16, width_factor=10, model_name="wrn16_10_cifar100", **kwargs)
def wrn16_10_svhn(num_classes=10, **kwargs):
"""
WRN-16-10 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=16, width_factor=10, model_name="wrn16_10_svhn", **kwargs)
def wrn28_10_cifar10(num_classes=10, **kwargs):
"""
WRN-28-10 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=28, width_factor=10, model_name="wrn28_10_cifar10", **kwargs)
def wrn28_10_cifar100(num_classes=100, **kwargs):
"""
WRN-28-10 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=28, width_factor=10, model_name="wrn28_10_cifar100", **kwargs)
def wrn28_10_svhn(num_classes=10, **kwargs):
"""
WRN-28-10 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=28, width_factor=10, model_name="wrn28_10_svhn", **kwargs)
def wrn40_8_cifar10(num_classes=10, **kwargs):
"""
WRN-40-8 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=40, width_factor=8, model_name="wrn40_8_cifar10", **kwargs)
def wrn40_8_cifar100(num_classes=100, **kwargs):
"""
WRN-40-8 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=40, width_factor=8, model_name="wrn40_8_cifar100", **kwargs)
def wrn40_8_svhn(num_classes=10, **kwargs):
"""
WRN-40-8 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=40, width_factor=8, model_name="wrn40_8_svhn", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(wrn16_10_cifar10, 10),
(wrn16_10_cifar100, 100),
(wrn16_10_svhn, 10),
(wrn28_10_cifar10, 10),
(wrn28_10_cifar100, 100),
(wrn28_10_svhn, 10),
(wrn40_8_cifar10, 10),
(wrn40_8_cifar100, 100),
(wrn40_8_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != wrn16_10_cifar10 or weight_count == 17116634)
assert (model != wrn16_10_cifar100 or weight_count == 17174324)
assert (model != wrn16_10_svhn or weight_count == 17116634)
assert (model != wrn28_10_cifar10 or weight_count == 36479194)
assert (model != wrn28_10_cifar100 or weight_count == 36536884)
assert (model != wrn28_10_svhn or weight_count == 36479194)
assert (model != wrn40_8_cifar10 or weight_count == 35748314)
assert (model != wrn40_8_cifar100 or weight_count == 35794484)
assert (model != wrn40_8_svhn or weight_count == 35748314)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes))
if __name__ == "__main__":
_test()
| 11,329
| 33.126506
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/inceptionresnetv2.py
|
"""
InceptionResNetV2 for ImageNet-1K, implemented in PyTorch.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionResNetV2', 'inceptionresnetv2']
import os
import torch.nn as nn
from .common import conv1x1_block, conv3x3_block, Concurrent
from .inceptionv3 import AvgPoolBranch, Conv1x1Branch, ConvSeqBranch
from .inceptionresnetv1 import InceptionAUnit, InceptionBUnit, InceptionCUnit, ReductionAUnit, ReductionBUnit
class InceptBlock5b(nn.Module):
"""
InceptionResNetV2 type Mixed-5b block.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(InceptBlock5b, self).__init__()
in_channels = 192
self.branches = Concurrent()
self.branches.add_module("branch1", Conv1x1Branch(
in_channels=in_channels,
out_channels=96,
bn_eps=bn_eps))
self.branches.add_module("branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(48, 64),
kernel_size_list=(1, 5),
strides_list=(1, 1),
padding_list=(0, 2),
bn_eps=bn_eps))
self.branches.add_module("branch3", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96, 96),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_eps=bn_eps))
self.branches.add_module("branch4", AvgPoolBranch(
in_channels=in_channels,
out_channels=64,
bn_eps=bn_eps,
count_include_pad=False))
def forward(self, x):
x = self.branches(x)
return x
class InceptInitBlock(nn.Module):
"""
InceptionResNetV2 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
bn_eps):
super(InceptInitBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
stride=2,
padding=0,
bn_eps=bn_eps)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
stride=1,
padding=0,
bn_eps=bn_eps)
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
stride=1,
padding=1,
bn_eps=bn_eps)
self.pool1 = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=0)
self.conv4 = conv1x1_block(
in_channels=64,
out_channels=80,
stride=1,
padding=0,
bn_eps=bn_eps)
self.conv5 = conv3x3_block(
in_channels=80,
out_channels=192,
stride=1,
padding=0,
bn_eps=bn_eps)
self.pool2 = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=0)
self.block = InceptBlock5b(bn_eps=bn_eps)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool1(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.pool2(x)
x = self.block(x)
return x
class InceptionResNetV2(nn.Module):
"""
InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
dropout_rate=0.0,
bn_eps=1e-5,
in_channels=3,
in_size=(299, 299),
num_classes=1000):
super(InceptionResNetV2, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
layers = [10, 21, 11]
in_channels_list = [320, 1088, 2080]
normal_out_channels_list = [[32, 32, 32, 32, 48, 64], [192, 128, 160, 192], [192, 192, 224, 256]]
reduction_out_channels_list = [[384, 256, 256, 384], [256, 384, 256, 288, 256, 288, 320]]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
self.features = nn.Sequential()
self.features.add_module("init_block", InceptInitBlock(
in_channels=in_channels,
bn_eps=bn_eps))
in_channels = in_channels_list[0]
for i, layers_per_stage in enumerate(layers):
stage = nn.Sequential()
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
out_channels_list_per_stage = reduction_out_channels_list[i - 1]
else:
unit = normal_units[i]
out_channels_list_per_stage = normal_out_channels_list[i]
if (i == len(layers) - 1) and (j == layers_per_stage - 1):
unit_kwargs = {"scale": 1.0, "activate": False}
else:
unit_kwargs = {}
stage.add_module("unit{}".format(j + 1), unit(
in_channels=in_channels,
out_channels_list=out_channels_list_per_stage,
bn_eps=bn_eps,
**unit_kwargs))
if (j == 0) and (i != 0):
in_channels = in_channels_list[i]
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_conv", conv1x1_block(
in_channels=in_channels,
out_channels=1536,
bn_eps=bn_eps))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Sequential()
if dropout_rate > 0.0:
self.output.add_module("dropout", nn.Dropout(p=dropout_rate))
self.output.add_module("fc", nn.Linear(
in_features=1536,
out_features=num_classes))
self._init_params()
def _init_params(self):
for module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_inceptionresnetv2(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create InceptionResNetV2 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
net = InceptionResNetV2(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def inceptionresnetv2(**kwargs):
"""
InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_inceptionresnetv2(model_name="inceptionresnetv2", bn_eps=1e-3, **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
inceptionresnetv2,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionresnetv2 or weight_count == 55843464)
x = torch.randn(1, 3, 299, 299)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 9,577
| 30.926667
| 117
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/ghostnet.py
|
"""
GhostNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
"""
__all__ = ['GhostNet', 'ghostnet']
import os
import math
import torch
import torch.nn as nn
from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block,\
dwsconv3x3_block, SEBlock
class GhostHSigmoid(nn.Module):
"""
Approximated sigmoid function, specific for GhostNet.
"""
def forward(self, x):
return torch.clamp(x, min=0.0, max=1.0)
class GhostConvBlock(nn.Module):
"""
GhostNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
activation=(lambda: nn.ReLU(inplace=True))):
super(GhostConvBlock, self).__init__()
main_out_channels = math.ceil(0.5 * out_channels)
cheap_out_channels = out_channels - main_out_channels
self.main_conv = conv1x1_block(
in_channels=in_channels,
out_channels=main_out_channels,
activation=activation)
self.cheap_conv = dwconv3x3_block(
in_channels=main_out_channels,
out_channels=cheap_out_channels,
activation=activation)
def forward(self, x):
x = self.main_conv(x)
y = self.cheap_conv(x)
return torch.cat((x, y), dim=1)
class GhostExpBlock(nn.Module):
"""
GhostNet expansion block for residual path in GhostNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : float
Expansion factor.
use_se : bool
Whether to use SE-module.
"""
def __init__(self,
in_channels,
out_channels,
stride,
use_kernel3,
exp_factor,
use_se):
super(GhostExpBlock, self).__init__()
self.use_dw_conv = (stride != 1)
self.use_se = use_se
mid_channels = int(math.ceil(exp_factor * in_channels))
self.exp_conv = GhostConvBlock(
in_channels=in_channels,
out_channels=mid_channels)
if self.use_dw_conv:
dw_conv_class = dwconv3x3_block if use_kernel3 else dwconv5x5_block
self.dw_conv = dw_conv_class(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
activation=None)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=4,
out_activation=GhostHSigmoid())
self.pw_conv = GhostConvBlock(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.exp_conv(x)
if self.use_dw_conv:
x = self.dw_conv(x)
if self.use_se:
x = self.se(x)
x = self.pw_conv(x)
return x
class GhostUnit(nn.Module):
"""
GhostNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the second convolution layer.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : float
Expansion factor.
use_se : bool
Whether to use SE-module.
"""
def __init__(self,
in_channels,
out_channels,
stride,
use_kernel3,
exp_factor,
use_se):
super(GhostUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = GhostExpBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
use_se=use_se)
if self.resize_identity:
self.identity_conv = dwsconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
pw_activation=None)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
return x
class GhostClassifier(nn.Module):
"""
GhostNet classifier.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels):
super(GhostClassifier, self).__init__()
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
bias=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class GhostNet(nn.Module):
"""
GhostNet model from 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
classifier_mid_channels : int
Number of middle channels for classifier.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
use_se : list of list of int/bool
Using SE-block flag for each unit.
first_stride : bool
Whether to use stride for the first stage.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
classifier_mid_channels,
kernels3,
exp_factors,
use_se,
first_stride,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(GhostNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
stride=2))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and ((i != 0) or first_stride) else 1
use_kernel3 = kernels3[i][j] == 1
exp_factor = exp_factors[i][j]
use_se_flag = use_se[i][j] == 1
stage.add_module("unit{}".format(j + 1), GhostUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
use_se=use_se_flag))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels))
in_channels = final_block_channels
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = GhostClassifier(
in_channels=in_channels,
out_channels=num_classes,
mid_channels=classifier_mid_channels)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.output(x)
x = x.view(x.size(0), -1)
return x
def get_ghostnet(width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create GhostNet model with specific parameters.
Parameters:
----------
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
init_block_channels = 16
channels = [[16], [24, 24], [40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160, 160, 160]]
kernels3 = [[1], [1, 1], [0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0]]
exp_factors = [[1], [3, 3], [3, 3], [6, 2.5, 2.3, 2.3, 6, 6], [6, 6, 6, 6, 6]]
use_se = [[0], [0, 0], [1, 1], [0, 0, 0, 0, 1, 1], [1, 0, 1, 0, 1]]
final_block_channels = 960
classifier_mid_channels = 1280
first_stride = False
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale, divisor=4) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale, divisor=4)
if width_scale > 1.0:
final_block_channels = round_channels(final_block_channels * width_scale, divisor=4)
net = GhostNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
classifier_mid_channels=classifier_mid_channels,
kernels3=kernels3,
exp_factors=exp_factors,
use_se=use_se,
first_stride=first_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def ghostnet(**kwargs):
"""
GhostNet model from 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_ghostnet(model_name="ghostnet", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
ghostnet,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ghostnet or weight_count == 5180840)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 12,819
| 30.268293
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/efficientnet.py
|
"""
EfficientNet for ImageNet-1K, implemented in PyTorch.
Original papers:
- 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946,
- 'Adversarial Examples Improve Image Recognition,' https://arxiv.org/abs/1911.09665.
"""
__all__ = ['EfficientNet', 'calc_tf_padding', 'EffiInvResUnit', 'EffiInitBlock', 'efficientnet_b0', 'efficientnet_b1',
'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6',
'efficientnet_b7', 'efficientnet_b8', 'efficientnet_b0b', 'efficientnet_b1b', 'efficientnet_b2b',
'efficientnet_b3b', 'efficientnet_b4b', 'efficientnet_b5b', 'efficientnet_b6b', 'efficientnet_b7b',
'efficientnet_b0c', 'efficientnet_b1c', 'efficientnet_b2c', 'efficientnet_b3c', 'efficientnet_b4c',
'efficientnet_b5c', 'efficientnet_b6c', 'efficientnet_b7c', 'efficientnet_b8c']
import os
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from .common import round_channels, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock
def calc_tf_padding(x,
kernel_size,
stride=1,
dilation=1):
"""
Calculate TF-same like padding size.
Parameters:
----------
x : tensor
Input tensor.
kernel_size : int
Convolution window size.
stride : int, default 1
Strides of the convolution.
dilation : int, default 1
Dilation value for convolution layer.
Returns:
-------
tuple of 4 int
The size of the padding.
"""
height, width = x.size()[2:]
oh = math.ceil(float(height) / stride)
ow = math.ceil(float(width) / stride)
pad_h = max((oh - 1) * stride + (kernel_size - 1) * dilation + 1 - height, 0)
pad_w = max((ow - 1) * stride + (kernel_size - 1) * dilation + 1 - width, 0)
return pad_h // 2, pad_h - pad_h // 2, pad_w // 2, pad_w - pad_w // 2
class EffiDwsConvUnit(nn.Module):
"""
EfficientNet specific depthwise separable convolution block/unit with BatchNorms and activations at each convolution
layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the second convolution layer.
bn_eps : float
Small float added to variance in Batch norm.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bn_eps,
activation,
tf_mode):
super(EffiDwsConvUnit, self).__init__()
self.tf_mode = tf_mode
self.residual = (in_channels == out_channels) and (stride == 1)
self.dw_conv = dwconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
padding=(0 if tf_mode else 1),
bn_eps=bn_eps,
activation=activation)
self.se = SEBlock(
channels=in_channels,
reduction=4,
mid_activation=activation)
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=None)
def forward(self, x):
if self.residual:
identity = x
if self.tf_mode:
x = F.pad(x, pad=calc_tf_padding(x, kernel_size=3))
x = self.dw_conv(x)
x = self.se(x)
x = self.pw_conv(x)
if self.residual:
x = x + identity
return x
class EffiInvResUnit(nn.Module):
"""
EfficientNet inverted residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the second convolution layer.
exp_factor : int
Factor for expansion of channels.
se_factor : int
SE reduction factor for each unit.
bn_eps : float
Small float added to variance in Batch norm.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
exp_factor,
se_factor,
bn_eps,
activation,
tf_mode):
super(EffiInvResUnit, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.tf_mode = tf_mode
self.residual = (in_channels == out_channels) and (stride == 1)
self.use_se = se_factor > 0
mid_channels = in_channels * exp_factor
dwconv_block_fn = dwconv3x3_block if kernel_size == 3 else (dwconv5x5_block if kernel_size == 5 else None)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_eps=bn_eps,
activation=activation)
self.conv2 = dwconv_block_fn(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
padding=(0 if tf_mode else (kernel_size // 2)),
bn_eps=bn_eps,
activation=activation)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
mid_activation=activation)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=None)
def forward(self, x):
if self.residual:
identity = x
x = self.conv1(x)
if self.tf_mode:
x = F.pad(x, pad=calc_tf_padding(x, kernel_size=self.kernel_size, stride=self.stride))
x = self.conv2(x)
if self.use_se:
x = self.se(x)
x = self.conv3(x)
if self.residual:
x = x + identity
return x
class EffiInitBlock(nn.Module):
"""
EfficientNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
activation,
tf_mode):
super(EffiInitBlock, self).__init__()
self.tf_mode = tf_mode
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2,
padding=(0 if tf_mode else 1),
bn_eps=bn_eps,
activation=activation)
def forward(self, x):
if self.tf_mode:
x = F.pad(x, pad=calc_tf_padding(x, kernel_size=3, stride=2))
x = self.conv(x)
return x
class EfficientNet(nn.Module):
"""
EfficientNet model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
kernel_sizes : list of list of int
Number of kernel sizes for each unit.
strides_per_stage : list int
Stride value for the first unit of each stage.
expansion_factors : list of list of int
Number of expansion factors for each unit.
dropout_rate : float, default 0.2
Fraction of the input units to drop. Must be a number between 0 and 1.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernel_sizes,
strides_per_stage,
expansion_factors,
dropout_rate=0.2,
tf_mode=False,
bn_eps=1e-5,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(EfficientNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
activation = "swish"
self.features = nn.Sequential()
self.features.add_module("init_block", EffiInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_eps=bn_eps,
activation=activation,
tf_mode=tf_mode))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
kernel_sizes_per_stage = kernel_sizes[i]
expansion_factors_per_stage = expansion_factors[i]
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
kernel_size = kernel_sizes_per_stage[j]
expansion_factor = expansion_factors_per_stage[j]
stride = strides_per_stage[i] if (j == 0) else 1
if i == 0:
stage.add_module("unit{}".format(j + 1), EffiDwsConvUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bn_eps=bn_eps,
activation=activation,
tf_mode=tf_mode))
else:
stage.add_module("unit{}".format(j + 1), EffiInvResUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
exp_factor=expansion_factor,
se_factor=4,
bn_eps=bn_eps,
activation=activation,
tf_mode=tf_mode))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_eps=bn_eps,
activation=activation))
in_channels = final_block_channels
self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1))
self.output = nn.Sequential()
if dropout_rate > 0.0:
self.output.add_module("dropout", nn.Dropout(p=dropout_rate))
self.output.add_module("fc", nn.Linear(
in_features=in_channels,
out_features=num_classes))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_efficientnet(version,
in_size,
tf_mode=False,
bn_eps=1e-5,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create EfficientNet model with specific parameters.
Parameters:
----------
version : str
Version of EfficientNet ('b0'...'b8').
in_size : tuple of two ints
Spatial size of the expected input image.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if version == "b0":
assert (in_size == (224, 224))
depth_factor = 1.0
width_factor = 1.0
dropout_rate = 0.2
elif version == "b1":
assert (in_size == (240, 240))
depth_factor = 1.1
width_factor = 1.0
dropout_rate = 0.2
elif version == "b2":
assert (in_size == (260, 260))
depth_factor = 1.2
width_factor = 1.1
dropout_rate = 0.3
elif version == "b3":
assert (in_size == (300, 300))
depth_factor = 1.4
width_factor = 1.2
dropout_rate = 0.3
elif version == "b4":
assert (in_size == (380, 380))
depth_factor = 1.8
width_factor = 1.4
dropout_rate = 0.4
elif version == "b5":
assert (in_size == (456, 456))
depth_factor = 2.2
width_factor = 1.6
dropout_rate = 0.4
elif version == "b6":
assert (in_size == (528, 528))
depth_factor = 2.6
width_factor = 1.8
dropout_rate = 0.5
elif version == "b7":
assert (in_size == (600, 600))
depth_factor = 3.1
width_factor = 2.0
dropout_rate = 0.5
elif version == "b8":
assert (in_size == (672, 672))
depth_factor = 3.6
width_factor = 2.2
dropout_rate = 0.5
else:
raise ValueError("Unsupported EfficientNet version {}".format(version))
init_block_channels = 32
layers = [1, 2, 2, 3, 3, 4, 1]
downsample = [1, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 40, 80, 112, 192, 320]
expansion_factors_per_layers = [1, 6, 6, 6, 6, 6, 6]
kernel_sizes_per_layers = [3, 3, 5, 3, 5, 5, 3]
strides_per_stage = [1, 2, 2, 2, 1, 2, 1]
final_block_channels = 1280
layers = [int(math.ceil(li * depth_factor)) for li in layers]
channels_per_layers = [round_channels(ci * width_factor) for ci in channels_per_layers]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [])
kernel_sizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(kernel_sizes_per_layers, layers, downsample), [])
expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(expansion_factors_per_layers, layers, downsample), [])
strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(strides_per_stage, layers, downsample), [])
strides_per_stage = [si[0] for si in strides_per_stage]
init_block_channels = round_channels(init_block_channels * width_factor)
if width_factor > 1.0:
assert (int(final_block_channels * width_factor) == round_channels(final_block_channels * width_factor))
final_block_channels = round_channels(final_block_channels * width_factor)
net = EfficientNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernel_sizes=kernel_sizes,
strides_per_stage=strides_per_stage,
expansion_factors=expansion_factors,
dropout_rate=dropout_rate,
tf_mode=tf_mode,
bn_eps=bn_eps,
in_size=in_size,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def efficientnet_b0(in_size=(224, 224), **kwargs):
"""
EfficientNet-B0 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b0", in_size=in_size, model_name="efficientnet_b0", **kwargs)
def efficientnet_b1(in_size=(240, 240), **kwargs):
"""
EfficientNet-B1 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b1", in_size=in_size, model_name="efficientnet_b1", **kwargs)
def efficientnet_b2(in_size=(260, 260), **kwargs):
"""
EfficientNet-B2 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (260, 260)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b2", in_size=in_size, model_name="efficientnet_b2", **kwargs)
def efficientnet_b3(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, model_name="efficientnet_b3", **kwargs)
def efficientnet_b4(in_size=(380, 380), **kwargs):
"""
EfficientNet-B4 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (380, 380)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b4", in_size=in_size, model_name="efficientnet_b4", **kwargs)
def efficientnet_b5(in_size=(456, 456), **kwargs):
"""
EfficientNet-B5 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (456, 456)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b5", in_size=in_size, model_name="efficientnet_b5", **kwargs)
def efficientnet_b6(in_size=(528, 528), **kwargs):
"""
EfficientNet-B6 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (528, 528)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b6", in_size=in_size, model_name="efficientnet_b6", **kwargs)
def efficientnet_b7(in_size=(600, 600), **kwargs):
"""
EfficientNet-B7 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (600, 600)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b7", in_size=in_size, model_name="efficientnet_b7", **kwargs)
def efficientnet_b8(in_size=(672, 672), **kwargs):
"""
EfficientNet-B8 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (672, 672)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b8", in_size=in_size, model_name="efficientnet_b8", **kwargs)
def efficientnet_b0b(in_size=(224, 224), **kwargs):
"""
EfficientNet-B0-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b0", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b0b",
**kwargs)
def efficientnet_b1b(in_size=(240, 240), **kwargs):
"""
EfficientNet-B1-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b1", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b1b",
**kwargs)
def efficientnet_b2b(in_size=(260, 260), **kwargs):
"""
EfficientNet-B2-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (260, 260)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b2", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b2b",
**kwargs)
def efficientnet_b3b(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b3b",
**kwargs)
def efficientnet_b4b(in_size=(380, 380), **kwargs):
"""
EfficientNet-B4-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (380, 380)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b4b",
**kwargs)
def efficientnet_b5b(in_size=(456, 456), **kwargs):
"""
EfficientNet-B5-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (456, 456)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b5", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b5b",
**kwargs)
def efficientnet_b6b(in_size=(528, 528), **kwargs):
"""
EfficientNet-B6-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (528, 528)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b6", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b6b",
**kwargs)
def efficientnet_b7b(in_size=(600, 600), **kwargs):
"""
EfficientNet-B7-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (600, 600)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b7", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b7b",
**kwargs)
def efficientnet_b0c(in_size=(224, 224), **kwargs):
"""
EfficientNet-B0-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b0", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b0c",
**kwargs)
def efficientnet_b1c(in_size=(240, 240), **kwargs):
"""
EfficientNet-B1-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b1", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b1c",
**kwargs)
def efficientnet_b2c(in_size=(260, 260), **kwargs):
"""
EfficientNet-B2-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (260, 260)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b2", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b2c",
**kwargs)
def efficientnet_b3c(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b3c",
**kwargs)
def efficientnet_b4c(in_size=(380, 380), **kwargs):
"""
EfficientNet-B4-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (380, 380)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b4c",
**kwargs)
def efficientnet_b5c(in_size=(456, 456), **kwargs):
"""
EfficientNet-B5-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (456, 456)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b5", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b5c",
**kwargs)
def efficientnet_b6c(in_size=(528, 528), **kwargs):
"""
EfficientNet-B6-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (528, 528)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b6", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b6c",
**kwargs)
def efficientnet_b7c(in_size=(600, 600), **kwargs):
"""
EfficientNet-B7-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (600, 600)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b7", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b7c",
**kwargs)
def efficientnet_b8c(in_size=(672, 672), **kwargs):
"""
EfficientNet-B8-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (672, 672)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b8", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b8c",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
efficientnet_b0,
efficientnet_b1,
efficientnet_b2,
efficientnet_b3,
efficientnet_b4,
efficientnet_b5,
efficientnet_b6,
efficientnet_b7,
efficientnet_b8,
efficientnet_b0b,
efficientnet_b1b,
efficientnet_b2b,
efficientnet_b3b,
efficientnet_b4b,
efficientnet_b5b,
efficientnet_b6b,
efficientnet_b7b,
efficientnet_b0c,
efficientnet_b1c,
efficientnet_b2c,
efficientnet_b3c,
efficientnet_b4c,
efficientnet_b5c,
efficientnet_b6c,
efficientnet_b7c,
efficientnet_b8c,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != efficientnet_b0 or weight_count == 5288548)
assert (model != efficientnet_b1 or weight_count == 7794184)
assert (model != efficientnet_b2 or weight_count == 9109994)
assert (model != efficientnet_b3 or weight_count == 12233232)
assert (model != efficientnet_b4 or weight_count == 19341616)
assert (model != efficientnet_b5 or weight_count == 30389784)
assert (model != efficientnet_b6 or weight_count == 43040704)
assert (model != efficientnet_b7 or weight_count == 66347960)
assert (model != efficientnet_b8 or weight_count == 87413142)
assert (model != efficientnet_b0b or weight_count == 5288548)
assert (model != efficientnet_b1b or weight_count == 7794184)
assert (model != efficientnet_b2b or weight_count == 9109994)
assert (model != efficientnet_b3b or weight_count == 12233232)
assert (model != efficientnet_b4b or weight_count == 19341616)
assert (model != efficientnet_b5b or weight_count == 30389784)
assert (model != efficientnet_b6b or weight_count == 43040704)
assert (model != efficientnet_b7b or weight_count == 66347960)
x = torch.randn(1, 3, net.in_size[0], net.in_size[1])
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 37,745
| 35.933464
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/edanet.py
|
"""
EDANet for image segmentation, implemented in PyTorch.
Original paper: 'Efficient Dense Modules of Asymmetric Convolution for Real-Time Semantic Segmentation,'
https://arxiv.org/abs/1809.06323.
"""
__all__ = ['EDANet', 'edanet_cityscapes']
import os
import torch
import torch.nn as nn
from .common import conv1x1, conv3x3, conv1x1_block, asym_conv3x3_block, NormActivation, InterpolationBlock
class DownBlock(nn.Module):
"""
EDANet specific downsample block for the main branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps):
super(DownBlock, self).__init__()
self.expand = (in_channels < out_channels)
mid_channels = out_channels - in_channels if self.expand else out_channels
self.conv = conv3x3(
in_channels=in_channels,
out_channels=mid_channels,
bias=True,
stride=2)
if self.expand:
self.pool = nn.MaxPool2d(
kernel_size=2,
stride=2)
self.norm_activ = NormActivation(
in_channels=out_channels,
bn_eps=bn_eps)
def forward(self, x):
y = self.conv(x)
if self.expand:
z = self.pool(x)
y = torch.cat((y, z), dim=1)
y = self.norm_activ(y)
return y
class EDABlock(nn.Module):
"""
EDANet base block.
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for convolution layer.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
channels,
dilation,
dropout_rate,
bn_eps):
super(EDABlock, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
self.conv1 = asym_conv3x3_block(
channels=channels,
bias=True,
lw_use_bn=False,
bn_eps=bn_eps,
lw_activation=None)
self.conv2 = asym_conv3x3_block(
channels=channels,
padding=dilation,
dilation=dilation,
bias=True,
lw_use_bn=False,
bn_eps=bn_eps,
rw_activation=None)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
return x
class EDAUnit(nn.Module):
"""
EDANet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dilation : int
Dilation value for convolution layer.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
dilation,
dropout_rate,
bn_eps):
super(EDAUnit, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
mid_channels = out_channels - in_channels
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bias=True)
self.conv2 = EDABlock(
channels=mid_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bn_eps=bn_eps)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
x = torch.cat((x, identity), dim=1)
x = self.activ(x)
return x
class EDANet(nn.Module):
"""
EDANet model from 'Efficient Dense Modules of Asymmetric Convolution for Real-Time Semantic Segmentation,'
https://arxiv.org/abs/1809.06323.
Parameters:
----------
channels : list of int
Number of output channels for the first unit of each stage.
dilations : list of list of int
Dilations for blocks.
growth_rate : int
Growth rate for numbers of output channels for each non-first unit.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
num_classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
channels,
dilations,
growth_rate,
bn_eps=1e-5,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
num_classes=19):
super(EDANet, self).__init__()
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.num_classes = num_classes
self.fixed_size = fixed_size
dropout_rate = 0.02
self.features = nn.Sequential()
for i, dilations_per_stage in enumerate(dilations):
out_channels = channels[i]
stage = nn.Sequential()
for j, dilation in enumerate(dilations_per_stage):
if j == 0:
stage.add_module("unit{}".format(j + 1), DownBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_eps=bn_eps))
else:
out_channels += growth_rate
stage.add_module("unit{}".format(j + 1), EDAUnit(
in_channels=in_channels,
out_channels=out_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bn_eps=bn_eps))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.head = conv1x1(
in_channels=in_channels,
out_channels=num_classes,
bias=True)
self.up = InterpolationBlock(
scale_factor=8,
align_corners=True)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.head(x)
x = self.up(x)
return x
def get_edanet(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create EDANet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels = [15, 60, 130, 450]
dilations = [[0], [0, 1, 1, 1, 2, 2], [0, 2, 2, 4, 4, 8, 8, 16, 16]]
growth_rate = 40
bn_eps = 1e-3
net = EDANet(
channels=channels,
dilations=dilations,
growth_rate=growth_rate,
bn_eps=bn_eps,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def edanet_cityscapes(num_classes=19, **kwargs):
"""
EDANet model for Cityscapes from 'Efficient Dense Modules of Asymmetric Convolution for Real-Time Semantic
Segmentation,' https://arxiv.org/abs/1809.06323.
Parameters:
----------
num_classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_edanet(num_classes=num_classes, model_name="edanet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
pretrained = False
fixed_size = True
in_size = (1024, 2048)
classes = 19
models = [
edanet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != edanet_cityscapes or weight_count == 689485)
batch = 4
x = torch.randn(batch, 3, in_size[0], in_size[1])
y = net(x)
# y.sum().backward()
assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 10,158
| 28.618076
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/channelnet.py
|
"""
ChannelNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise Convolutions,'
https://arxiv.org/abs/1809.01330.
"""
__all__ = ['ChannelNet', 'channelnet']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
def dwconv3x3(in_channels,
out_channels,
stride,
bias=False):
"""
3x3 depthwise version of the standard convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=1,
groups=out_channels,
bias=bias)
class ChannetConv(nn.Module):
"""
ChannelNet specific convolution block with Batch normalization and ReLU6 activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
dropout_rate=0.0,
activate=True):
super(ChannetConv, self).__init__()
self.use_dropout = (dropout_rate > 0.0)
self.activate = activate
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
self.bn = nn.BatchNorm2d(num_features=out_channels)
if self.activate:
self.activ = nn.ReLU6(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.use_dropout:
x = self.dropout(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def channet_conv1x1(in_channels,
out_channels,
stride=1,
groups=1,
bias=False,
dropout_rate=0.0,
activate=True):
"""
1x1 version of ChannelNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
"""
return ChannetConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
groups=groups,
bias=bias,
dropout_rate=dropout_rate,
activate=activate)
def channet_conv3x3(in_channels,
out_channels,
stride,
padding=1,
dilation=1,
groups=1,
bias=False,
dropout_rate=0.0,
activate=True):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
"""
return ChannetConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
dropout_rate=dropout_rate,
activate=activate)
class ChannetDwsConvBlock(nn.Module):
"""
ChannelNet specific depthwise separable convolution block with BatchNorms and activations at last convolution
layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
groups : int, default 1
Number of groups.
dropout_rate : float, default 0.0
Dropout rate.
"""
def __init__(self,
in_channels,
out_channels,
stride,
groups=1,
dropout_rate=0.0):
super(ChannetDwsConvBlock, self).__init__()
self.dw_conv = dwconv3x3(
in_channels=in_channels,
out_channels=in_channels,
stride=stride)
self.pw_conv = channet_conv1x1(
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
dropout_rate=dropout_rate)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
class SimpleGroupBlock(nn.Module):
"""
ChannelNet specific block with a sequence of depthwise separable group convolution layers.
Parameters:
----------
channels : int
Number of input/output channels.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
"""
def __init__(self,
channels,
multi_blocks,
groups,
dropout_rate):
super(SimpleGroupBlock, self).__init__()
self.blocks = nn.Sequential()
for i in range(multi_blocks):
self.blocks.add_module("block{}".format(i + 1), ChannetDwsConvBlock(
in_channels=channels,
out_channels=channels,
stride=1,
groups=groups,
dropout_rate=dropout_rate))
def forward(self, x):
x = self.blocks(x)
return x
class ChannelwiseConv2d(nn.Module):
"""
ChannelNet specific block with channel-wise convolution.
Parameters:
----------
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
"""
def __init__(self,
groups,
dropout_rate):
super(ChannelwiseConv2d, self).__init__()
self.use_dropout = (dropout_rate > 0.0)
self.conv = nn.Conv3d(
in_channels=1,
out_channels=groups,
kernel_size=(4 * groups, 1, 1),
stride=(groups, 1, 1),
padding=(2 * groups - 1, 0, 0),
bias=False)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x):
batch, channels, height, width = x.size()
x = x.unsqueeze(dim=1)
x = self.conv(x)
if self.use_dropout:
x = self.dropout(x)
x = x.view(batch, channels, height, width)
return x
class ConvGroupBlock(nn.Module):
"""
ChannelNet specific block with a combination of channel-wise convolution, depthwise separable group convolutions.
Parameters:
----------
channels : int
Number of input/output channels.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
"""
def __init__(self,
channels,
multi_blocks,
groups,
dropout_rate):
super(ConvGroupBlock, self).__init__()
self.conv = ChannelwiseConv2d(
groups=groups,
dropout_rate=dropout_rate)
self.block = SimpleGroupBlock(
channels=channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate)
def forward(self, x):
x = self.conv(x)
x = self.block(x)
return x
class ChannetUnit(nn.Module):
"""
ChannelNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : tuple/list of 2 int
Number of output channels for each sub-block.
strides : int or tuple/list of 2 int
Strides of the convolution.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
block_names : tuple/list of 2 str
Sub-block names.
merge_type : str
Type of sub-block output merging.
"""
def __init__(self,
in_channels,
out_channels_list,
strides,
multi_blocks,
groups,
dropout_rate,
block_names,
merge_type):
super(ChannetUnit, self).__init__()
assert (len(block_names) == 2)
assert (merge_type in ["seq", "add", "cat"])
self.merge_type = merge_type
self.blocks = nn.Sequential()
for i, (out_channels, block_name) in enumerate(zip(out_channels_list, block_names)):
stride_i = (strides if i == 0 else 1)
if block_name == "channet_conv3x3":
self.blocks.add_module("block{}".format(i + 1), channet_conv3x3(
in_channels=in_channels,
out_channels=out_channels,
stride=stride_i,
dropout_rate=dropout_rate,
activate=False))
elif block_name == "channet_dws_conv_block":
self.blocks.add_module("block{}".format(i + 1), ChannetDwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride_i,
dropout_rate=dropout_rate))
elif block_name == "simple_group_block":
self.blocks.add_module("block{}".format(i + 1), SimpleGroupBlock(
channels=in_channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate))
elif block_name == "conv_group_block":
self.blocks.add_module("block{}".format(i + 1), ConvGroupBlock(
channels=in_channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate))
else:
raise NotImplementedError()
in_channels = out_channels
def forward(self, x):
x_outs = []
for block in self.blocks._modules.values():
x = block(x)
x_outs.append(x)
if self.merge_type == "add":
for i in range(len(x_outs) - 1):
x = x + x_outs[i]
elif self.merge_type == "cat":
x = torch.cat(tuple(x_outs), dim=1)
return x
class ChannelNet(nn.Module):
"""
ChannelNet model from 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise
Convolutions,' https://arxiv.org/abs/1809.01330.
Parameters:
----------
channels : list of list of list of int
Number of output channels for each unit.
block_names : list of list of list of str
Names of blocks for each unit.
block_names : list of list of str
Merge types for each unit.
dropout_rate : float, default 0.0001
Dropout rate.
multi_blocks : int, default 2
Block count architectural parameter.
groups : int, default 2
Group count architectural parameter.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
block_names,
merge_types,
dropout_rate=0.0001,
multi_blocks=2,
groups=2,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ChannelNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
stage.add_module("unit{}".format(j + 1), ChannetUnit(
in_channels=in_channels,
out_channels_list=out_channels,
strides=strides,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate,
block_names=block_names[i][j],
merge_type=merge_types[i][j]))
if merge_types[i][j] == "cat":
in_channels = sum(out_channels)
else:
in_channels = out_channels[-1]
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_channelnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ChannelNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels = [[[32, 64]], [[128, 128]], [[256, 256]], [[512, 512], [512, 512]], [[1024, 1024]]]
block_names = [[["channet_conv3x3", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "simple_group_block"], ["conv_group_block", "conv_group_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]]]
merge_types = [["cat"], ["cat"], ["cat"], ["add", "add"], ["seq"]]
net = ChannelNet(
channels=channels,
block_names=block_names,
merge_types=merge_types,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def channelnet(**kwargs):
"""
ChannelNet model from 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise
Convolutions,' https://arxiv.org/abs/1809.01330.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_channelnet(model_name="channelnet", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
channelnet,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != channelnet or weight_count == 3875112)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 18,471
| 29.633499
| 117
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/pnasnet.py
|
"""
PNASNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
"""
__all__ = ['PNASNet', 'pnasnet5large']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1
from .nasnet import nasnet_dual_path_sequential, nasnet_batch_norm, NasConv, NasDwsConv, NasPathBlock, NASNetInitBlock
class PnasMaxPoolBlock(nn.Module):
"""
PNASNet specific Max pooling layer with extra padding.
Parameters:
----------
stride : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
"""
def __init__(self,
stride=2,
extra_padding=False):
super(PnasMaxPoolBlock, self).__init__()
self.extra_padding = extra_padding
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=stride,
padding=1)
if self.extra_padding:
self.pad = nn.ZeroPad2d(padding=(1, 0, 1, 0))
def forward(self, x):
if self.extra_padding:
x = self.pad(x)
x = self.pool(x)
if self.extra_padding:
x = x[:, :, 1:, 1:].contiguous()
return x
def pnas_conv1x1(in_channels,
out_channels,
stride=1):
"""
1x1 version of the PNASNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
"""
return NasConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
groups=1)
class DwsBranch(nn.Module):
"""
PNASNet specific block with depthwise separable convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
extra_padding=False,
stem=False):
super(DwsBranch, self).__init__()
assert (not stem) or (not extra_padding)
mid_channels = out_channels if stem else in_channels
padding = kernel_size // 2
self.conv1 = NasDwsConv(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
extra_padding=extra_padding)
self.conv2 = NasDwsConv(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def dws_branch_k3(in_channels,
out_channels,
stride=2,
extra_padding=False,
stem=False):
"""
3x3 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
extra_padding=extra_padding,
stem=stem)
def dws_branch_k5(in_channels,
out_channels,
stride=2,
extra_padding=False,
stem=False):
"""
5x5 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
extra_padding=extra_padding,
stem=stem)
def dws_branch_k7(in_channels,
out_channels,
stride=2,
extra_padding=False):
"""
7x7 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
extra_padding=extra_padding,
stem=False)
class PnasMaxPathBlock(nn.Module):
"""
PNASNet specific `max path` auxiliary block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(PnasMaxPathBlock, self).__init__()
self.maxpool = PnasMaxPoolBlock()
self.conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels)
self.bn = nasnet_batch_norm(channels=out_channels)
def forward(self, x):
x = self.maxpool(x)
x = self.conv(x)
x = self.bn(x)
return x
class PnasBaseUnit(nn.Module):
"""
PNASNet base unit.
"""
def __init__(self):
super(PnasBaseUnit, self).__init__()
def cell_forward(self, x, x_prev):
assert (hasattr(self, 'comb0_left'))
x_left = x_prev
x_right = x
x0 = self.comb0_left(x_left) + self.comb0_right(x_left)
x1 = self.comb1_left(x_right) + self.comb1_right(x_right)
x2 = self.comb2_left(x_right) + self.comb2_right(x_right)
x3 = self.comb3_left(x2) + self.comb3_right(x_right)
x4 = self.comb4_left(x_left) + (self.comb4_right(x_right) if self.comb4_right else x_right)
x_out = torch.cat((x0, x1, x2, x3, x4), dim=1)
return x_out
class Stem1Unit(PnasBaseUnit):
"""
PNASNet Stem1 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(Stem1Unit, self).__init__()
mid_channels = out_channels // 5
self.conv_1x1 = pnas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.comb0_left = dws_branch_k5(
in_channels=in_channels,
out_channels=mid_channels,
stem=True)
self.comb0_right = PnasMaxPathBlock(
in_channels=in_channels,
out_channels=mid_channels)
self.comb1_left = dws_branch_k7(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb1_right = PnasMaxPoolBlock()
self.comb2_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb2_right = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb3_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
stride=1)
self.comb3_right = PnasMaxPoolBlock()
self.comb4_left = dws_branch_k3(
in_channels=in_channels,
out_channels=mid_channels,
stem=True)
self.comb4_right = pnas_conv1x1(
in_channels=mid_channels,
out_channels=mid_channels,
stride=2)
def forward(self, x):
x_prev = x
x = self.conv_1x1(x)
x_out = self.cell_forward(x, x_prev)
return x_out
class PnasUnit(PnasBaseUnit):
"""
PNASNet ordinary unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
reduction : bool, default False
Whether to use reduction.
extra_padding : bool, default False
Whether to use extra padding.
match_prev_layer_dimensions : bool, default False
Whether to match previous layer dimensions.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
reduction=False,
extra_padding=False,
match_prev_layer_dimensions=False):
super(PnasUnit, self).__init__()
mid_channels = out_channels // 5
stride = 2 if reduction else 1
if match_prev_layer_dimensions:
self.conv_prev_1x1 = NasPathBlock(
in_channels=prev_in_channels,
out_channels=mid_channels)
else:
self.conv_prev_1x1 = pnas_conv1x1(
in_channels=prev_in_channels,
out_channels=mid_channels)
self.conv_1x1 = pnas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.comb0_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
extra_padding=extra_padding)
self.comb0_right = PnasMaxPoolBlock(
stride=stride,
extra_padding=extra_padding)
self.comb1_left = dws_branch_k7(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
extra_padding=extra_padding)
self.comb1_right = PnasMaxPoolBlock(
stride=stride,
extra_padding=extra_padding)
self.comb2_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
extra_padding=extra_padding)
self.comb2_right = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
extra_padding=extra_padding)
self.comb3_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
stride=1)
self.comb3_right = PnasMaxPoolBlock(
stride=stride,
extra_padding=extra_padding)
self.comb4_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
extra_padding=extra_padding)
if reduction:
self.comb4_right = pnas_conv1x1(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride)
else:
self.comb4_right = None
def forward(self, x, x_prev):
# print("x.shape={}, x_prev.shape={}".format(x.shape, x_prev.shape))
x_prev = self.conv_prev_1x1(x_prev)
x = self.conv_1x1(x)
x_out = self.cell_forward(x, x_prev)
return x_out
class PNASNet(nn.Module):
"""
PNASNet model from 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
stem1_blocks_channels : list of 2 int
Number of output channels for the Stem1 unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (331, 331)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
stem1_blocks_channels,
in_channels=3,
in_size=(331, 331),
num_classes=1000):
super(PNASNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nasnet_dual_path_sequential(
return_two=False,
first_ordinals=2,
last_ordinals=2)
self.features.add_module("init_block", NASNetInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
self.features.add_module("stem1_unit", Stem1Unit(
in_channels=in_channels,
out_channels=stem1_blocks_channels))
prev_in_channels = in_channels
in_channels = stem1_blocks_channels
for i, channels_per_stage in enumerate(channels):
stage = nasnet_dual_path_sequential()
for j, out_channels in enumerate(channels_per_stage):
reduction = (j == 0)
extra_padding = (j == 0) and (i not in [0, 2])
match_prev_layer_dimensions = (j == 1) or ((j == 0) and (i == 0))
stage.add_module("unit{}".format(j + 1), PnasUnit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
reduction=reduction,
extra_padding=extra_padding,
match_prev_layer_dimensions=match_prev_layer_dimensions))
prev_in_channels = in_channels
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("activ", nn.ReLU())
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=11,
stride=1))
self.output = nn.Sequential()
self.output.add_module("dropout", nn.Dropout(p=0.5))
self.output.add_module("fc", nn.Linear(
in_features=in_channels,
out_features=num_classes))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_pnasnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create PNASNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
repeat = 4
init_block_channels = 96
stem_blocks_channels = [270, 540]
norm_channels = [1080, 2160, 4320]
channels = [[ci] * repeat for ci in norm_channels]
stem1_blocks_channels = stem_blocks_channels[0]
channels[0] = [stem_blocks_channels[1]] + channels[0]
net = PNASNet(
channels=channels,
init_block_channels=init_block_channels,
stem1_blocks_channels=stem1_blocks_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def pnasnet5large(**kwargs):
"""
PNASNet-5-Large model from 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pnasnet(model_name="pnasnet5large", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
pnasnet5large,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pnasnet5large or weight_count == 86057668)
x = torch.randn(1, 3, 331, 331)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 18,176
| 28.945634
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/efficientnetedge.py
|
"""
EfficientNet-Edge for ImageNet-1K, implemented in PyTorch.
Original paper: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
"""
__all__ = ['EfficientNetEdge', 'efficientnet_edge_small_b', 'efficientnet_edge_medium_b', 'efficientnet_edge_large_b']
import os
import math
import torch.nn as nn
import torch.nn.init as init
from .common import round_channels, conv1x1_block, conv3x3_block, SEBlock
from .efficientnet import EffiInvResUnit, EffiInitBlock
class EffiEdgeResUnit(nn.Module):
"""
EfficientNet-Edge edge residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the second convolution layer.
exp_factor : int
Factor for expansion of channels.
se_factor : int
SE reduction factor for each unit.
mid_from_in : bool
Whether to use input channel count for middle channel count calculation.
use_skip : bool
Whether to use skip connection.
bn_eps : float
Small float added to variance in Batch norm.
activation : str
Name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
stride,
exp_factor,
se_factor,
mid_from_in,
use_skip,
bn_eps,
activation):
super(EffiEdgeResUnit, self).__init__()
self.residual = (in_channels == out_channels) and (stride == 1) and use_skip
self.use_se = se_factor > 0
mid_channels = in_channels * exp_factor if mid_from_in else out_channels * exp_factor
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_eps=bn_eps,
activation=activation)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
mid_activation=activation)
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
stride=stride,
bn_eps=bn_eps,
activation=None)
def forward(self, x):
if self.residual:
identity = x
x = self.conv1(x)
if self.use_se:
x = self.se(x)
x = self.conv2(x)
if self.residual:
x = x + identity
return x
class EfficientNetEdge(nn.Module):
"""
EfficientNet-Edge model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
kernel_sizes : list of list of int
Number of kernel sizes for each unit.
strides_per_stage : list int
Stride value for the first unit of each stage.
expansion_factors : list of list of int
Number of expansion factors for each unit.
dropout_rate : float, default 0.2
Fraction of the input units to drop. Must be a number between 0 and 1.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernel_sizes,
strides_per_stage,
expansion_factors,
dropout_rate=0.2,
tf_mode=False,
bn_eps=1e-5,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(EfficientNetEdge, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
activation = "relu"
self.features = nn.Sequential()
self.features.add_module("init_block", EffiInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_eps=bn_eps,
activation=activation,
tf_mode=tf_mode))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
kernel_sizes_per_stage = kernel_sizes[i]
expansion_factors_per_stage = expansion_factors[i]
mid_from_in = (i != 0)
use_skip = (i != 0)
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
kernel_size = kernel_sizes_per_stage[j]
expansion_factor = expansion_factors_per_stage[j]
stride = strides_per_stage[i] if (j == 0) else 1
if i < 3:
stage.add_module("unit{}".format(j + 1), EffiEdgeResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
exp_factor=expansion_factor,
se_factor=0,
mid_from_in=mid_from_in,
use_skip=use_skip,
bn_eps=bn_eps,
activation=activation))
else:
stage.add_module("unit{}".format(j + 1), EffiInvResUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
exp_factor=expansion_factor,
se_factor=0,
bn_eps=bn_eps,
activation=activation,
tf_mode=tf_mode))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_eps=bn_eps,
activation=activation))
in_channels = final_block_channels
self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1))
self.output = nn.Sequential()
if dropout_rate > 0.0:
self.output.add_module("dropout", nn.Dropout(p=dropout_rate))
self.output.add_module("fc", nn.Linear(
in_features=in_channels,
out_features=num_classes))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_efficientnet_edge(version,
in_size,
tf_mode=False,
bn_eps=1e-5,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create EfficientNet-Edge model with specific parameters.
Parameters:
----------
version : str
Version of EfficientNet ('small', 'medium', 'large').
in_size : tuple of two ints
Spatial size of the expected input image.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
dropout_rate = 0.0
if version == "small":
assert (in_size == (224, 224))
depth_factor = 1.0
width_factor = 1.0
# dropout_rate = 0.2
elif version == "medium":
assert (in_size == (240, 240))
depth_factor = 1.1
width_factor = 1.0
# dropout_rate = 0.2
elif version == "large":
assert (in_size == (300, 300))
depth_factor = 1.4
width_factor = 1.2
# dropout_rate = 0.3
else:
raise ValueError("Unsupported EfficientNet-Edge version {}".format(version))
init_block_channels = 32
layers = [1, 2, 4, 5, 4, 2]
downsample = [1, 1, 1, 1, 0, 1]
channels_per_layers = [24, 32, 48, 96, 144, 192]
expansion_factors_per_layers = [4, 8, 8, 8, 8, 8]
kernel_sizes_per_layers = [3, 3, 3, 5, 5, 5]
strides_per_stage = [1, 2, 2, 2, 1, 2]
final_block_channels = 1280
layers = [int(math.ceil(li * depth_factor)) for li in layers]
channels_per_layers = [round_channels(ci * width_factor) for ci in channels_per_layers]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [])
kernel_sizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(kernel_sizes_per_layers, layers, downsample), [])
expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(expansion_factors_per_layers, layers, downsample), [])
strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(strides_per_stage, layers, downsample), [])
strides_per_stage = [si[0] for si in strides_per_stage]
init_block_channels = round_channels(init_block_channels * width_factor)
if width_factor > 1.0:
assert (int(final_block_channels * width_factor) == round_channels(final_block_channels * width_factor))
final_block_channels = round_channels(final_block_channels * width_factor)
net = EfficientNetEdge(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernel_sizes=kernel_sizes,
strides_per_stage=strides_per_stage,
expansion_factors=expansion_factors,
dropout_rate=dropout_rate,
tf_mode=tf_mode,
bn_eps=bn_eps,
in_size=in_size,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def efficientnet_edge_small_b(in_size=(224, 224), **kwargs):
"""
EfficientNet-Edge-Small-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet_edge(version="small", in_size=in_size, tf_mode=True, bn_eps=1e-3,
model_name="efficientnet_edge_small_b", **kwargs)
def efficientnet_edge_medium_b(in_size=(240, 240), **kwargs):
"""
EfficientNet-Edge-Medium-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet_edge(version="medium", in_size=in_size, tf_mode=True, bn_eps=1e-3,
model_name="efficientnet_edge_medium_b", **kwargs)
def efficientnet_edge_large_b(in_size=(300, 300), **kwargs):
"""
EfficientNet-Edge-Large-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_efficientnet_edge(version="large", in_size=in_size, tf_mode=True, bn_eps=1e-3,
model_name="efficientnet_edge_large_b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
efficientnet_edge_small_b,
efficientnet_edge_medium_b,
efficientnet_edge_large_b,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != efficientnet_edge_small_b or weight_count == 5438392)
assert (model != efficientnet_edge_medium_b or weight_count == 6899496)
assert (model != efficientnet_edge_large_b or weight_count == 10589712)
x = torch.randn(1, 3, net.in_size[0], net.in_size[1])
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 14,866
| 35.799505
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/ibnresnext.py
|
"""
IBN-ResNeXt for ImageNet-1K, implemented in PyTorch.
Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
"""
__all__ = ['IBNResNeXt', 'ibn_resnext50_32x4d', 'ibn_resnext101_32x4d', 'ibn_resnext101_64x4d']
import os
import math
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block
from .resnet import ResInitBlock
from .ibnresnet import ibn_conv1x1_block
class IBNResNeXtBottleneck(nn.Module):
"""
IBN-ResNeXt bottleneck block for residual path in IBN-ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality,
bottleneck_width,
conv1_ibn):
super(IBNResNeXtBottleneck, self).__init__()
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
self.conv1 = ibn_conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
use_ibn=conv1_ibn)
self.conv2 = conv3x3_block(
in_channels=group_width,
out_channels=group_width,
stride=stride,
groups=cardinality)
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class IBNResNeXtUnit(nn.Module):
"""
IBN-ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality,
bottleneck_width,
conv1_ibn):
super(IBNResNeXtUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = IBNResNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
conv1_ibn=conv1_ibn)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class IBNResNeXt(nn.Module):
"""
IBN-ResNeXt model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(IBNResNeXt, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
conv1_ibn = (out_channels < 2048)
stage.add_module("unit{}".format(j + 1), IBNResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
conv1_ibn=conv1_ibn))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_ibnresnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create IBN-ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported IBN-ResNeXt with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = IBNResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def ibn_resnext50_32x4d(**kwargs):
"""
IBN-ResNeXt-50 (32x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_ibnresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="ibn_resnext50_32x4d", **kwargs)
def ibn_resnext101_32x4d(**kwargs):
"""
IBN-ResNeXt-101 (32x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_ibnresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="ibn_resnext101_32x4d", **kwargs)
def ibn_resnext101_64x4d(**kwargs):
"""
IBN-ResNeXt-101 (64x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_ibnresnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="ibn_resnext101_64x4d", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
ibn_resnext50_32x4d,
ibn_resnext101_32x4d,
ibn_resnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibn_resnext50_32x4d or weight_count == 25028904)
assert (model != ibn_resnext101_32x4d or weight_count == 44177704)
assert (model != ibn_resnext101_64x4d or weight_count == 83455272)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 10,749
| 30.341108
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/squeezenext.py
|
"""
SqueezeNext for ImageNet-1K, implemented in PyTorch.
Original paper: 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
"""
__all__ = ['SqueezeNext', 'sqnxt23_w1', 'sqnxt23_w3d2', 'sqnxt23_w2', 'sqnxt23v5_w1', 'sqnxt23v5_w3d2', 'sqnxt23v5_w2']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import ConvBlock, conv1x1_block, conv7x7_block
class SqnxtUnit(nn.Module):
"""
SqueezeNext unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride):
super(SqnxtUnit, self).__init__()
if stride == 2:
reduction_den = 1
self.resize_identity = True
elif in_channels > out_channels:
reduction_den = 4
self.resize_identity = True
else:
reduction_den = 2
self.resize_identity = False
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=(in_channels // reduction_den),
stride=stride,
bias=True)
self.conv2 = conv1x1_block(
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // (2 * reduction_den)),
bias=True)
self.conv3 = ConvBlock(
in_channels=(in_channels // (2 * reduction_den)),
out_channels=(in_channels // reduction_den),
kernel_size=(1, 3),
stride=1,
padding=(0, 1),
bias=True)
self.conv4 = ConvBlock(
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // reduction_den),
kernel_size=(3, 1),
stride=1,
padding=(1, 0),
bias=True)
self.conv5 = conv1x1_block(
in_channels=(in_channels // reduction_den),
out_channels=out_channels,
bias=True)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=True)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = x + identity
x = self.activ(x)
return x
class SqnxtInitBlock(nn.Module):
"""
SqueezeNext specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(SqnxtInitBlock, self).__init__()
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2,
padding=1,
bias=True)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
ceil_mode=True)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class SqueezeNext(nn.Module):
"""
SqueezeNext model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SqueezeNext, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", SqnxtInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), SqnxtUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bias=True))
in_channels = final_block_channels
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_squeezenext(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SqueezeNext model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('23' or '23v5').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
init_block_channels = 64
final_block_channels = 128
channels_per_layers = [32, 64, 128, 256]
if version == '23':
layers = [6, 6, 8, 1]
elif version == '23v5':
layers = [2, 4, 14, 1]
else:
raise ValueError("Unsupported SqueezeNet version {}".format(version))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
final_block_channels = int(final_block_channels * width_scale)
net = SqueezeNext(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def sqnxt23_w1(**kwargs):
"""
1.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=1.0, model_name="sqnxt23_w1", **kwargs)
def sqnxt23_w3d2(**kwargs):
"""
1.5-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=1.5, model_name="sqnxt23_w3d2", **kwargs)
def sqnxt23_w2(**kwargs):
"""
2.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=2.0, model_name="sqnxt23_w2", **kwargs)
def sqnxt23v5_w1(**kwargs):
"""
1.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=1.0, model_name="sqnxt23v5_w1", **kwargs)
def sqnxt23v5_w3d2(**kwargs):
"""
1.5-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=1.5, model_name="sqnxt23v5_w3d2", **kwargs)
def sqnxt23v5_w2(**kwargs):
"""
2.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=2.0, model_name="sqnxt23v5_w2", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
sqnxt23_w1,
sqnxt23_w3d2,
sqnxt23_w2,
sqnxt23v5_w1,
sqnxt23v5_w3d2,
sqnxt23v5_w2,
]
for model in models:
net = model(pretrained=pretrained)
# net.eval()
net.train()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sqnxt23_w1 or weight_count == 724056)
assert (model != sqnxt23_w3d2 or weight_count == 1511824)
assert (model != sqnxt23_w2 or weight_count == 2583752)
assert (model != sqnxt23v5_w1 or weight_count == 921816)
assert (model != sqnxt23v5_w3d2 or weight_count == 1953616)
assert (model != sqnxt23v5_w2 or weight_count == 3366344)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 12,238
| 30.543814
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/xdensenet.py
|
"""
X-DenseNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
"""
__all__ = ['XDenseNet', 'xdensenet121_2', 'xdensenet161_2', 'xdensenet169_2', 'xdensenet201_2', 'pre_xconv3x3_block',
'XDenseUnit']
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from .preresnet import PreResInitBlock, PreResActivation
from .densenet import TransitionBlock
class XConv2d(nn.Conv2d):
"""
X-Convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
groups : int, default 1
Number of groups.
expand_ratio : int, default 2
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
groups=1,
expand_ratio=2,
**kwargs):
super(XConv2d, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
groups=groups,
**kwargs)
self.expand_ratio = expand_ratio
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
grouped_in_channels = in_channels // groups
self.mask = torch.nn.Parameter(
data=torch.Tensor(out_channels, grouped_in_channels, *kernel_size),
requires_grad=False)
self.init_parameters()
def init_parameters(self):
shape = self.mask.shape
expand_size = max(shape[1] // self.expand_ratio, 1)
self.mask[:] = 0
for i in range(shape[0]):
jj = torch.randperm(shape[1], device=self.mask.device)[:expand_size]
self.mask[i, jj, :, :] = 1
def forward(self, input):
masked_weight = self.weight.mul(self.mask)
return F.conv2d(
input=input,
weight=masked_weight,
bias=self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups)
class PreXConvBlock(nn.Module):
"""
X-Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
expand_ratio : int, default 2
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
return_preact=False,
activate=True,
expand_ratio=2):
super(PreXConvBlock, self).__init__()
self.return_preact = return_preact
self.activate = activate
self.bn = nn.BatchNorm2d(num_features=in_channels)
if self.activate:
self.activ = nn.ReLU(inplace=True)
self.conv = XConv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
expand_ratio=expand_ratio)
def forward(self, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_xconv1x1_block(in_channels,
out_channels,
stride=1,
bias=False,
return_preact=False,
activate=True,
expand_ratio=2):
"""
1x1 version of the pre-activated x-convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
expand_ratio : int, default 2
Ratio of expansion.
"""
return PreXConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
bias=bias,
return_preact=return_preact,
activate=activate,
expand_ratio=expand_ratio)
def pre_xconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
return_preact=False,
activate=True,
expand_ratio=2):
"""
3x3 version of the pre-activated x-convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
expand_ratio : int, default 2
Ratio of expansion.
"""
return PreXConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
return_preact=return_preact,
activate=activate,
expand_ratio=expand_ratio)
class XDenseUnit(nn.Module):
"""
X-DenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
expand_ratio : int
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
dropout_rate,
expand_ratio):
super(XDenseUnit, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
self.conv1 = pre_xconv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
expand_ratio=expand_ratio)
self.conv2 = pre_xconv3x3_block(
in_channels=mid_channels,
out_channels=inc_channels,
expand_ratio=expand_ratio)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
x = torch.cat((identity, x), dim=1)
return x
class XDenseNet(nn.Module):
"""
X-DenseNet model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
expand_ratio : int, default 2
Ratio of expansion.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
dropout_rate=0.0,
expand_ratio=2,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(XDenseNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
if i != 0:
stage.add_module("trans{}".format(i + 1), TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2)))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
stage.add_module("unit{}".format(j + 1), XDenseUnit(
in_channels=in_channels,
out_channels=out_channels,
dropout_rate=dropout_rate,
expand_ratio=expand_ratio))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_xdensenet(blocks,
expand_ratio=2,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create X-DenseNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
expand_ratio : int, default 2
Ratio of expansion.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif blocks == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif blocks == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif blocks == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError("Unsupported X-DenseNet version with number of layers {}".format(blocks))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = XDenseNet(
channels=channels,
init_block_channels=init_block_channels,
expand_ratio=expand_ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def xdensenet121_2(**kwargs):
"""
X-DenseNet-121-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=121, model_name="xdensenet121_2", **kwargs)
def xdensenet161_2(**kwargs):
"""
X-DenseNet-161-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=161, model_name="xdensenet161_2", **kwargs)
def xdensenet169_2(**kwargs):
"""
X-DenseNet-169-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=169, model_name="xdensenet169_2", **kwargs)
def xdensenet201_2(**kwargs):
"""
X-DenseNet-201-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=201, model_name="xdensenet201_2", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
xdensenet121_2,
xdensenet161_2,
xdensenet169_2,
xdensenet201_2,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != xdensenet121_2 or weight_count == 7978856)
assert (model != xdensenet161_2 or weight_count == 28681000)
assert (model != xdensenet169_2 or weight_count == 14149480)
assert (model != xdensenet201_2 or weight_count == 20013928)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 16,251
| 30.015267
| 117
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/linknet.py
|
"""
LinkNet for image segmentation, implemented in PyTorch.
Original paper: 'LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation,'
https://arxiv.org/abs/1707.03718.
"""
__all__ = ['LinkNet', 'linknet_cityscapes']
import os
import torch
import torch.nn as nn
from .common import conv1x1_block, conv3x3_block, deconv3x3_block, Hourglass, Identity
from .resnet import resnet18
class DecoderStage(nn.Module):
"""
LinkNet specific decoder stage.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the deconvolution.
out_padding : int or tuple/list of 2 int
Output padding value for deconvolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
"""
def __init__(self,
in_channels,
out_channels,
stride,
output_padding,
bias):
super(DecoderStage, self).__init__()
mid_channels = in_channels // 4
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bias=bias)
self.conv2 = deconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
out_padding=output_padding,
bias=bias)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class LinkNetHead(nn.Module):
"""
LinkNet head block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(LinkNetHead, self).__init__()
mid_channels = in_channels // 2
self.conv1 = deconv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=2,
padding=1,
out_padding=1,
bias=True)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
bias=True)
self.conv3 = nn.ConvTranspose2d(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=2,
stride=2,
padding=0)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class LinkNet(nn.Module):
"""
LinkNet model from 'LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation,'
https://arxiv.org/abs/1707.03718.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels form feature extractor.
channels : list of int
Number of output channels for the first unit of each stage.
dilations : list of list of int
Dilation values for each unit.
dropout_rates : list of float
Parameter of dropout layer for each stage.
downs : list of int
Whether to downscale or upscale in each stage.
correct_size_mistmatch : bool
Whether to correct downscaled sizes of images in encoder.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
num_classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
strides,
output_paddings,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
num_classes=19):
super(LinkNet, self).__init__()
assert (in_channels == 3)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.num_classes = num_classes
self.fixed_size = fixed_size
bias = False
self.stem = backbone.init_block
in_channels = backbone_out_channels
down_seq = nn.Sequential()
down_seq.add_module("down1", backbone.stage1)
down_seq.add_module("down2", backbone.stage2)
down_seq.add_module("down3", backbone.stage3)
down_seq.add_module("down4", backbone.stage4)
up_seq = nn.Sequential()
skip_seq = nn.Sequential()
for i, out_channels in enumerate(channels):
up_seq.add_module("up{}".format(i + 1), DecoderStage(
in_channels=in_channels,
out_channels=out_channels,
stride=strides[i],
output_padding=output_paddings[i],
bias=bias))
in_channels = out_channels
skip_seq.add_module("skip{}".format(i + 1), Identity())
up_seq = up_seq[::-1]
self.hg = Hourglass(
down_seq=down_seq,
up_seq=up_seq,
skip_seq=skip_seq)
self.head = LinkNetHead(
in_channels=in_channels,
out_channels=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.stem(x)
x = self.hg(x)
x = self.head(x)
return x
def get_linknet(backbone,
backbone_out_channels,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create LinkNet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels form feature extractor.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels = [256, 128, 64, 64]
strides = [2, 2, 2, 1]
output_paddings = [1, 1, 1, 0]
net = LinkNet(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
strides=strides,
output_paddings=output_paddings,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def linknet_cityscapes(pretrained_backbone=False, num_classes=19, **kwargs):
"""
LinkNet model for Cityscapes from 'LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation,'
https://arxiv.org/abs/1707.03718.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnet18(pretrained=pretrained_backbone).features
del backbone[-1]
backbone_out_channels = 512
return get_linknet(backbone=backbone, backbone_out_channels=backbone_out_channels, num_classes=num_classes,
model_name="linknet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
pretrained = False
fixed_size = True
in_size = (1024, 2048)
classes = 19
models = [
linknet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != linknet_cityscapes or weight_count == 11535699)
batch = 4
x = torch.randn(batch, 3, in_size[0], in_size[1])
y = net(x)
# y.sum().backward()
assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 9,565
| 29.5623
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/diaresnet_cifar.py
|
"""
DIA-ResNet for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
"""
__all__ = ['CIFARDIAResNet', 'diaresnet20_cifar10', 'diaresnet20_cifar100', 'diaresnet20_svhn', 'diaresnet56_cifar10',
'diaresnet56_cifar100', 'diaresnet56_svhn', 'diaresnet110_cifar10', 'diaresnet110_cifar100',
'diaresnet110_svhn', 'diaresnet164bn_cifar10', 'diaresnet164bn_cifar100', 'diaresnet164bn_svhn',
'diaresnet1001_cifar10', 'diaresnet1001_cifar100', 'diaresnet1001_svhn', 'diaresnet1202_cifar10',
'diaresnet1202_cifar100', 'diaresnet1202_svhn']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv3x3_block, DualPathSequential
from .diaresnet import DIAAttention, DIAResUnit
class CIFARDIAResNet(nn.Module):
"""
DIA-ResNet model for CIFAR from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARDIAResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(return_two=False)
attention = DIAAttention(
in_x_features=channels_per_stage[0],
in_h_features=channels_per_stage[0])
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), DIAResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=False,
attention=attention))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_diaresnet_cifar(num_classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create DIA-ResNet model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert (num_classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARDIAResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def diaresnet20_cifar10(num_classes=10, **kwargs):
"""
DIA-ResNet-20 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="diaresnet20_cifar10",
**kwargs)
def diaresnet20_cifar100(num_classes=100, **kwargs):
"""
DIA-ResNet-20 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="diaresnet20_cifar100",
**kwargs)
def diaresnet20_svhn(num_classes=10, **kwargs):
"""
DIA-ResNet-20 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="diaresnet20_svhn",
**kwargs)
def diaresnet56_cifar10(num_classes=10, **kwargs):
"""
DIA-ResNet-56 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="diaresnet56_cifar10",
**kwargs)
def diaresnet56_cifar100(num_classes=100, **kwargs):
"""
DIA-ResNet-56 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="diaresnet56_cifar100",
**kwargs)
def diaresnet56_svhn(num_classes=10, **kwargs):
"""
DIA-ResNet-56 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="diaresnet56_svhn",
**kwargs)
def diaresnet110_cifar10(num_classes=10, **kwargs):
"""
DIA-ResNet-110 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="diaresnet110_cifar10",
**kwargs)
def diaresnet110_cifar100(num_classes=100, **kwargs):
"""
DIA-ResNet-110 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False,
model_name="diaresnet110_cifar100", **kwargs)
def diaresnet110_svhn(num_classes=10, **kwargs):
"""
DIA-ResNet-110 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="diaresnet110_svhn",
**kwargs)
def diaresnet164bn_cifar10(num_classes=10, **kwargs):
"""
DIA-ResNet-164(BN) model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True,
model_name="diaresnet164bn_cifar10", **kwargs)
def diaresnet164bn_cifar100(num_classes=100, **kwargs):
"""
DIA-ResNet-164(BN) model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True,
model_name="diaresnet164bn_cifar100", **kwargs)
def diaresnet164bn_svhn(num_classes=10, **kwargs):
"""
DIA-ResNet-164(BN) model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="diaresnet164bn_svhn",
**kwargs)
def diaresnet1001_cifar10(num_classes=10, **kwargs):
"""
DIA-ResNet-1001 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True,
model_name="diaresnet1001_cifar10", **kwargs)
def diaresnet1001_cifar100(num_classes=100, **kwargs):
"""
DIA-ResNet-1001 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True,
model_name="diaresnet1001_cifar100", **kwargs)
def diaresnet1001_svhn(num_classes=10, **kwargs):
"""
DIA-ResNet-1001 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="diaresnet1001_svhn",
**kwargs)
def diaresnet1202_cifar10(num_classes=10, **kwargs):
"""
DIA-ResNet-1202 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False,
model_name="diaresnet1202_cifar10", **kwargs)
def diaresnet1202_cifar100(num_classes=100, **kwargs):
"""
DIA-ResNet-1202 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False,
model_name="diaresnet1202_cifar100", **kwargs)
def diaresnet1202_svhn(num_classes=10, **kwargs):
"""
DIA-ResNet-1202 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="diaresnet1202_svhn",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(diaresnet20_cifar10, 10),
(diaresnet20_cifar100, 100),
(diaresnet20_svhn, 10),
(diaresnet56_cifar10, 10),
(diaresnet56_cifar100, 100),
(diaresnet56_svhn, 10),
(diaresnet110_cifar10, 10),
(diaresnet110_cifar100, 100),
(diaresnet110_svhn, 10),
(diaresnet164bn_cifar10, 10),
(diaresnet164bn_cifar100, 100),
(diaresnet164bn_svhn, 10),
(diaresnet1001_cifar10, 10),
(diaresnet1001_cifar100, 100),
(diaresnet1001_svhn, 10),
(diaresnet1202_cifar10, 10),
(diaresnet1202_cifar100, 100),
(diaresnet1202_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != diaresnet20_cifar10 or weight_count == 286866)
assert (model != diaresnet20_cifar100 or weight_count == 292716)
assert (model != diaresnet20_svhn or weight_count == 286866)
assert (model != diaresnet56_cifar10 or weight_count == 870162)
assert (model != diaresnet56_cifar100 or weight_count == 876012)
assert (model != diaresnet56_svhn or weight_count == 870162)
assert (model != diaresnet110_cifar10 or weight_count == 1745106)
assert (model != diaresnet110_cifar100 or weight_count == 1750956)
assert (model != diaresnet110_svhn or weight_count == 1745106)
assert (model != diaresnet164bn_cifar10 or weight_count == 1923002)
assert (model != diaresnet164bn_cifar100 or weight_count == 1946132)
assert (model != diaresnet164bn_svhn or weight_count == 1923002)
assert (model != diaresnet1001_cifar10 or weight_count == 10547450)
assert (model != diaresnet1001_cifar100 or weight_count == 10570580)
assert (model != diaresnet1001_svhn or weight_count == 10547450)
assert (model != diaresnet1202_cifar10 or weight_count == 19438418)
assert (model != diaresnet1202_cifar100 or weight_count == 19444268)
assert (model != diaresnet1202_svhn or weight_count == 19438418)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes))
if __name__ == "__main__":
_test()
| 19,959
| 35.489945
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/resdropresnet_cifar.py
|
"""
ResDrop-ResNet for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
"""
__all__ = ['CIFARResDropResNet', 'resdropresnet20_cifar10', 'resdropresnet20_cifar100', 'resdropresnet20_svhn']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block
from .resnet import ResBlock, ResBottleneck
class ResDropResUnit(nn.Module):
"""
ResDrop-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_prob : float
Residual branch life probability.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck,
life_prob):
super(ResDropResUnit, self).__init__()
self.life_prob = life_prob
self.resize_identity = (in_channels != out_channels) or (stride != 1)
body_class = ResBottleneck if bottleneck else ResBlock
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
if self.training:
b = torch.bernoulli(torch.full((1,), self.life_prob, dtype=x.dtype, device=x.device))
x = float(b) / self.life_prob * x
x = x + identity
x = self.activ(x)
return x
class CIFARResDropResNet(nn.Module):
"""
ResDrop-ResNet model for CIFAR from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_probs : list of float
Residual branch life probability for each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
life_probs,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARResDropResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
k = 0
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), ResDropResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
life_prob=life_probs[k]))
in_channels = out_channels
k += 1
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_resdropresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ResDrop-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
channels_per_layers = [16, 32, 64]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
total_layers = sum(layers)
final_death_prob = 0.5
life_probs = [1.0 - float(i + 1) / float(total_layers) * final_death_prob for i in range(total_layers)]
net = CIFARResDropResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
life_probs=life_probs,
num_classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def resdropresnet20_cifar10(classes=10, **kwargs):
"""
ResDrop-ResNet-20 model for CIFAR-10 from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_cifar10",
**kwargs)
def resdropresnet20_cifar100(classes=100, **kwargs):
"""
ResDrop-ResNet-20 model for CIFAR-100 from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_cifar100",
**kwargs)
def resdropresnet20_svhn(classes=10, **kwargs):
"""
ResDrop-ResNet-20 model for SVHN from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_svhn",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(resdropresnet20_cifar10, 10),
(resdropresnet20_cifar100, 100),
(resdropresnet20_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resdropresnet20_cifar10 or weight_count == 272474)
assert (model != resdropresnet20_cifar100 or weight_count == 278324)
assert (model != resdropresnet20_svhn or weight_count == 272474)
x = torch.randn(14, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (14, num_classes))
if __name__ == "__main__":
_test()
| 9,918
| 31.735974
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/bisenet.py
|
"""
BiSeNet for CelebAMask-HQ, implemented in PyTorch.
Original paper: 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1808.00897.
"""
__all__ = ['BiSeNet', 'bisenet_resnet18_celebamaskhq']
import os
import torch
import torch.nn as nn
from .common import conv1x1, conv1x1_block, conv3x3_block, InterpolationBlock, MultiOutputSequential
from .resnet import resnet18
class PyramidPoolingZeroBranch(nn.Module):
"""
Pyramid pooling zero branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of 2 int
Spatial size of output image for the upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
in_size):
super(PyramidPoolingZeroBranch, self).__init__()
self.in_size = in_size
self.pool = nn.AdaptiveAvgPool2d(1)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
self.up = InterpolationBlock(
scale_factor=None,
mode="nearest",
align_corners=None)
def forward(self, x):
in_size = self.in_size if self.in_size is not None else x.shape[2:]
x = self.pool(x)
x = self.conv(x)
x = self.up(x, size=in_size)
return x
class AttentionRefinementBlock(nn.Module):
"""
Attention refinement block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(AttentionRefinementBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels)
self.pool = nn.AdaptiveAvgPool2d(1)
self.conv2 = conv1x1_block(
in_channels=out_channels,
out_channels=out_channels,
activation=(lambda: nn.Sigmoid()))
def forward(self, x):
x = self.conv1(x)
w = self.pool(x)
w = self.conv2(w)
x = x * w
return x
class PyramidPoolingMainBranch(nn.Module):
"""
Pyramid pooling main branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
scale_factor : float
Multiplier for spatial size.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor):
super(PyramidPoolingMainBranch, self).__init__()
self.att = AttentionRefinementBlock(
in_channels=in_channels,
out_channels=out_channels)
self.up = InterpolationBlock(
scale_factor=scale_factor,
mode="nearest",
align_corners=None)
self.conv = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels)
def forward(self, x, y):
x = self.att(x)
x = x + y
x = self.up(x)
x = self.conv(x)
return x
class FeatureFusion(nn.Module):
"""
Feature fusion block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
reduction : int, default 4
Squeeze reduction value.
"""
def __init__(self,
in_channels,
out_channels,
reduction=4):
super(FeatureFusion, self).__init__()
mid_channels = out_channels // reduction
self.conv_merge = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
self.pool = nn.AdaptiveAvgPool2d(1)
self.conv1 = conv1x1(
in_channels=out_channels,
out_channels=mid_channels)
self.activ = nn.ReLU(inplace=True)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels)
self.sigmoid = nn.Sigmoid()
def forward(self, x, y):
x = torch.cat((x, y), dim=1)
x = self.conv_merge(x)
w = self.pool(x)
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
x_att = x * w
x = x + x_att
return x
class PyramidPooling(nn.Module):
"""
Pyramid Pooling module.
Parameters:
----------
x16_in_channels : int
Number of input channels for x16.
x32_in_channels : int
Number of input channels for x32.
y_out_channels : int
Number of output channels for y-outputs.
y32_out_size : tuple of 2 int
Spatial size of the y32 tensor.
"""
def __init__(self,
x16_in_channels,
x32_in_channels,
y_out_channels,
y32_out_size):
super(PyramidPooling, self).__init__()
z_out_channels = 2 * y_out_channels
self.pool32 = PyramidPoolingZeroBranch(
in_channels=x32_in_channels,
out_channels=y_out_channels,
in_size=y32_out_size)
self.pool16 = PyramidPoolingMainBranch(
in_channels=x32_in_channels,
out_channels=y_out_channels,
scale_factor=2)
self.pool8 = PyramidPoolingMainBranch(
in_channels=x16_in_channels,
out_channels=y_out_channels,
scale_factor=2)
self.fusion = FeatureFusion(
in_channels=z_out_channels,
out_channels=z_out_channels)
def forward(self, x8, x16, x32):
y32 = self.pool32(x32)
y16 = self.pool16(x32, y32)
y8 = self.pool8(x16, y16)
z8 = self.fusion(x8, y8)
return z8, y8, y16
class BiSeHead(nn.Module):
"""
BiSeNet head (final) block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels):
super(BiSeHead, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class BiSeNet(nn.Module):
"""
BiSeNet model from 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1808.00897.
Parameters:
----------
backbone : func -> nn.Sequential
Feature extractor.
aux : bool, default True
Whether to output an auxiliary results.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (640, 480)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
backbone,
aux=True,
fixed_size=True,
in_channels=3,
in_size=(640, 480),
num_classes=19):
super(BiSeNet, self).__init__()
assert (in_channels == 3)
self.in_size = in_size
self.num_classes = num_classes
self.aux = aux
self.fixed_size = fixed_size
self.backbone, backbone_out_channels = backbone()
y_out_channels = backbone_out_channels[0]
z_out_channels = 2 * y_out_channels
y32_out_size = (self.in_size[0] // 32, self.in_size[1] // 32) if fixed_size else None
self.pool = PyramidPooling(
x16_in_channels=backbone_out_channels[1],
x32_in_channels=backbone_out_channels[2],
y_out_channels=y_out_channels,
y32_out_size=y32_out_size)
self.head_z8 = BiSeHead(
in_channels=z_out_channels,
mid_channels=z_out_channels,
out_channels=num_classes)
self.up8 = InterpolationBlock(scale_factor=(8 if fixed_size else None))
if self.aux:
mid_channels = y_out_channels // 2
self.head_y8 = BiSeHead(
in_channels=y_out_channels,
mid_channels=mid_channels,
out_channels=num_classes)
self.head_y16 = BiSeHead(
in_channels=y_out_channels,
mid_channels=mid_channels,
out_channels=num_classes)
self.up16 = InterpolationBlock(scale_factor=(16 if fixed_size else None))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, a=1)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
assert (x.shape[2] % 32 == 0) and (x.shape[3] % 32 == 0)
x8, x16, x32 = self.backbone(x)
z8, y8, y16 = self.pool(x8, x16, x32)
z8 = self.head_z8(z8)
z8 = self.up8(z8)
if self.aux:
y8 = self.head_y8(y8)
y16 = self.head_y16(y16)
y8 = self.up8(y8)
y16 = self.up16(y16)
return z8, y8, y16
else:
return z8
def get_bisenet(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create BiSeNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
net = BiSeNet(
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def bisenet_resnet18_celebamaskhq(pretrained_backbone=False, num_classes=19, **kwargs):
"""
BiSeNet model on the base of ResNet-18 for face segmentation on CelebAMask-HQ from 'BiSeNet: Bilateral Segmentation
Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1808.00897.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 19
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
def backbone():
features_raw = resnet18(pretrained=pretrained_backbone).features
del features_raw[-1]
features = MultiOutputSequential(return_last=False)
features.add_module("init_block", features_raw[0])
for i, stage in enumerate(features_raw[1:]):
if i != 0:
stage.do_output = True
features.add_module("stage{}".format(i + 1), stage)
out_channels = [128, 256, 512]
return features, out_channels
return get_bisenet(backbone=backbone, num_classes=num_classes, model_name="bisenet_resnet18_celebamaskhq", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
in_size = (640, 480)
aux = True
pretrained = False
models = [
bisenet_resnet18_celebamaskhq,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != bisenet_resnet18_celebamaskhq or weight_count == 13300416)
else:
assert (model != bisenet_resnet18_celebamaskhq or weight_count == 13150272)
batch = 1
x = torch.randn(batch, 3, in_size[0], in_size[1])
ys = net(x)
y = ys[0] if aux else ys
# y.sum().backward()
assert (tuple(y.size()) == (batch, 19, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 13,181
| 28.959091
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/resnet.py
|
"""
ResNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2',
'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b',
'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'ResBlock', 'ResBottleneck',
'ResUnit', 'ResInitBlock']
import os
import torch.nn as nn
from .common import conv1x1_block, conv3x3_block, conv7x7_block
class ResBlock(nn.Module):
"""
Simple ResNet block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bias=False,
use_bn=True):
super(ResBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class ResBottleneck(nn.Module):
"""
ResNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
padding=1,
dilation=1,
conv1_stride=False,
bottleneck_factor=4):
super(ResBottleneck, self).__init__()
mid_channels = out_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=(stride if conv1_stride else 1))
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=(1 if conv1_stride else stride),
padding=padding,
dilation=dilation)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class ResUnit(nn.Module):
"""
ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer in bottleneck.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bottleneck=True,
conv1_stride=False):
super(ResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
use_bn=use_bn,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class ResInitBlock(nn.Module):
"""
ResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(ResInitBlock, self).__init__()
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class ResNet(nn.Module):
"""
ResNet model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), ResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_resnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def resnet10(**kwargs):
"""
ResNet-10 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=10, model_name="resnet10", **kwargs)
def resnet12(**kwargs):
"""
ResNet-12 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=12, model_name="resnet12", **kwargs)
def resnet14(**kwargs):
"""
ResNet-14 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=14, model_name="resnet14", **kwargs)
def resnetbc14b(**kwargs):
"""
ResNet-BC-14b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs)
def resnet16(**kwargs):
"""
ResNet-16 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=16, model_name="resnet16", **kwargs)
def resnet18_wd4(**kwargs):
"""
ResNet-18 model with 0.25 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs)
def resnet18_wd2(**kwargs):
"""
ResNet-18 model with 0.5 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs)
def resnet18_w3d4(**kwargs):
"""
ResNet-18 model with 0.75 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs)
def resnet18(**kwargs):
"""
ResNet-18 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="resnet18", **kwargs)
def resnet26(**kwargs):
"""
ResNet-26 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs)
def resnetbc26b(**kwargs):
"""
ResNet-BC-26b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs)
def resnet34(**kwargs):
"""
ResNet-34 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="resnet34", **kwargs)
def resnetbc38b(**kwargs):
"""
ResNet-BC-38b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b", **kwargs)
def resnet50(**kwargs):
"""
ResNet-50 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="resnet50", **kwargs)
def resnet50b(**kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs)
def resnet101(**kwargs):
"""
ResNet-101 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="resnet101", **kwargs)
def resnet101b(**kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs)
def resnet152(**kwargs):
"""
ResNet-152 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="resnet152", **kwargs)
def resnet152b(**kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs)
def resnet200(**kwargs):
"""
ResNet-200 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=200, model_name="resnet200", **kwargs)
def resnet200b(**kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
resnet10,
resnet12,
resnet14,
resnetbc14b,
resnet16,
resnet18_wd4,
resnet18_wd2,
resnet18_w3d4,
resnet18,
resnet26,
resnetbc26b,
resnet34,
resnetbc38b,
resnet50,
resnet50b,
resnet101,
resnet101b,
resnet152,
resnet152b,
resnet200,
resnet200b,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10 or weight_count == 5418792)
assert (model != resnet12 or weight_count == 5492776)
assert (model != resnet14 or weight_count == 5788200)
assert (model != resnetbc14b or weight_count == 10064936)
assert (model != resnet16 or weight_count == 6968872)
assert (model != resnet18_wd4 or weight_count == 3937400)
assert (model != resnet18_wd2 or weight_count == 5804296)
assert (model != resnet18_w3d4 or weight_count == 8476056)
assert (model != resnet18 or weight_count == 11689512)
assert (model != resnet26 or weight_count == 17960232)
assert (model != resnetbc26b or weight_count == 15995176)
assert (model != resnet34 or weight_count == 21797672)
assert (model != resnetbc38b or weight_count == 21925416)
assert (model != resnet50 or weight_count == 25557032)
assert (model != resnet50b or weight_count == 25557032)
assert (model != resnet101 or weight_count == 44549160)
assert (model != resnet101b or weight_count == 44549160)
assert (model != resnet152 or weight_count == 60192808)
assert (model != resnet152b or weight_count == 60192808)
assert (model != resnet200 or weight_count == 64673832)
assert (model != resnet200b or weight_count == 64673832)
batch = 4
x = torch.randn(batch, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (batch, 1000))
if __name__ == "__main__":
_test()
| 25,346
| 31.579692
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/simpleposemobile_coco.py
|
"""
SimplePose(Mobile) for COCO Keypoint, implemented in PyTorch.
Original paper: 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
"""
__all__ = ['SimplePoseMobile', 'simplepose_mobile_resnet18_coco', 'simplepose_mobile_resnet50b_coco',
'simplepose_mobile_mobilenet_w1_coco', 'simplepose_mobile_mobilenetv2b_w1_coco',
'simplepose_mobile_mobilenetv3_small_w1_coco', 'simplepose_mobile_mobilenetv3_large_w1_coco']
import os
import torch
import torch.nn as nn
from .common import conv1x1, DucBlock, HeatmapMaxDetBlock
from .resnet import resnet18, resnet50b
from .mobilenet import mobilenet_w1
from .mobilenetv2 import mobilenetv2b_w1
from .mobilenetv3 import mobilenetv3_small_w1, mobilenetv3_large_w1
class SimplePoseMobile(nn.Module):
"""
SimplePose(Mobile) model from 'Simple Baselines for Human Pose Estimation and Tracking,'
https://arxiv.org/abs/1804.06208.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
channels : list of int
Number of output channels for each decoder unit.
decoder_init_block_channels : int
Number of output channels for the initial unit of the decoder.
return_heatmap : bool, default False
Whether to return only heatmap.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 192)
Spatial size of the expected input image.
keypoints : int, default 17
Number of keypoints.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
decoder_init_block_channels,
return_heatmap=False,
in_channels=3,
in_size=(256, 192),
keypoints=17):
super(SimplePoseMobile, self).__init__()
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.return_heatmap = return_heatmap
self.backbone = backbone
self.decoder = nn.Sequential()
in_channels = backbone_out_channels
self.decoder.add_module("init_block", conv1x1(
in_channels=in_channels,
out_channels=decoder_init_block_channels))
in_channels = decoder_init_block_channels
for i, out_channels in enumerate(channels):
self.decoder.add_module("unit{}".format(i + 1), DucBlock(
in_channels=in_channels,
out_channels=out_channels,
scale_factor=2))
in_channels = out_channels
self.decoder.add_module("final_block", conv1x1(
in_channels=in_channels,
out_channels=keypoints))
self.heatmap_max_det = HeatmapMaxDetBlock()
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.backbone(x)
heatmap = self.decoder(x)
if self.return_heatmap:
return heatmap
else:
keypoints = self.heatmap_max_det(heatmap)
return keypoints
def get_simpleposemobile(backbone,
backbone_out_channels,
keypoints,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SimplePose(Mobile) model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
keypoints : int
Number of keypoints.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels = [128, 64, 32]
decoder_init_block_channels = 256
net = SimplePoseMobile(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
decoder_init_block_channels=decoder_init_block_channels,
keypoints=keypoints,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def simplepose_mobile_resnet18_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
SimplePose(Mobile) model on the base of ResNet-18 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnet18(pretrained=pretrained_backbone).features
del backbone[-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=512, keypoints=keypoints,
model_name="simplepose_mobile_resnet18_coco", **kwargs)
def simplepose_mobile_resnet50b_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
SimplePose(Mobile) model on the base of ResNet-50b for COCO Keypoint from 'Simple Baselines for Human Pose
Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone).features
del backbone[-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="simplepose_mobile_resnet50b_coco", **kwargs)
def simplepose_mobile_mobilenet_w1_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
SimplePose(Mobile) model on the base of 1.0 MobileNet-224 for COCO Keypoint from 'Simple Baselines for Human Pose
Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = mobilenet_w1(pretrained=pretrained_backbone).features
del backbone[-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=1024, keypoints=keypoints,
model_name="simplepose_mobile_mobilenet_w1_coco", **kwargs)
def simplepose_mobile_mobilenetv2b_w1_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
SimplePose(Mobile) model on the base of 1.0 MobileNetV2b-224 for COCO Keypoint from 'Simple Baselines for Human Pose
Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = mobilenetv2b_w1(pretrained=pretrained_backbone).features
del backbone[-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=1280, keypoints=keypoints,
model_name="simplepose_mobile_mobilenetv2b_w1_coco", **kwargs)
def simplepose_mobile_mobilenetv3_small_w1_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
SimplePose(Mobile) model on the base of MobileNetV3 Small 224/1.0 for COCO Keypoint from 'Simple Baselines for Human
Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = mobilenetv3_small_w1(pretrained=pretrained_backbone).features
del backbone[-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=576, keypoints=keypoints,
model_name="simplepose_mobile_mobilenetv3_small_w1_coco", **kwargs)
def simplepose_mobile_mobilenetv3_large_w1_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
SimplePose(Mobile) model on the base of MobileNetV3 Large 224/1.0 for COCO Keypoint from 'Simple Baselines for Human
Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = mobilenetv3_large_w1(pretrained=pretrained_backbone).features
del backbone[-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=960, keypoints=keypoints,
model_name="simplepose_mobile_mobilenetv3_large_w1_coco", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
in_size = (256, 192)
keypoints = 17
return_heatmap = False
pretrained = False
models = [
simplepose_mobile_resnet18_coco,
simplepose_mobile_resnet50b_coco,
simplepose_mobile_mobilenet_w1_coco,
simplepose_mobile_mobilenetv2b_w1_coco,
simplepose_mobile_mobilenetv3_small_w1_coco,
simplepose_mobile_mobilenetv3_large_w1_coco,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != simplepose_mobile_resnet18_coco or weight_count == 12858208)
assert (model != simplepose_mobile_resnet50b_coco or weight_count == 25582944)
assert (model != simplepose_mobile_mobilenet_w1_coco or weight_count == 5019744)
assert (model != simplepose_mobile_mobilenetv2b_w1_coco or weight_count == 4102176)
assert (model != simplepose_mobile_mobilenetv3_small_w1_coco or weight_count == 2625088)
assert (model != simplepose_mobile_mobilenetv3_large_w1_coco or weight_count == 4768336)
batch = 14
x = torch.randn(batch, 3, in_size[0], in_size[1])
y = net(x)
assert ((y.shape[0] == batch) and (y.shape[1] == keypoints))
if return_heatmap:
assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4))
else:
assert (y.shape[2] == 3)
if __name__ == "__main__":
_test()
| 12,743
| 37.735562
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/cbamresnet.py
|
"""
CBAM-ResNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
"""
__all__ = ['CbamResNet', 'cbam_resnet18', 'cbam_resnet34', 'cbam_resnet50', 'cbam_resnet101', 'cbam_resnet152']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv7x7_block
from .resnet import ResInitBlock, ResBlock, ResBottleneck
class MLP(nn.Module):
"""
Multilayer perceptron block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
def __init__(self,
channels,
reduction_ratio=16):
super(MLP, self).__init__()
mid_channels = channels // reduction_ratio
self.fc1 = nn.Linear(
in_features=channels,
out_features=mid_channels)
self.activ = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(
in_features=mid_channels,
out_features=channels)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.activ(x)
x = self.fc2(x)
return x
class ChannelGate(nn.Module):
"""
CBAM channel gate block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
def __init__(self,
channels,
reduction_ratio=16):
super(ChannelGate, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.max_pool = nn.AdaptiveMaxPool2d(output_size=(1, 1))
self.mlp = MLP(
channels=channels,
reduction_ratio=reduction_ratio)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
att1 = self.avg_pool(x)
att1 = self.mlp(att1)
att2 = self.max_pool(x)
att2 = self.mlp(att2)
att = att1 + att2
att = self.sigmoid(att)
att = att.unsqueeze(2).unsqueeze(3).expand_as(x)
x = x * att
return x
class SpatialGate(nn.Module):
"""
CBAM spatial gate block.
"""
def __init__(self):
super(SpatialGate, self).__init__()
self.conv = conv7x7_block(
in_channels=2,
out_channels=1,
activation=None)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
att1 = x.max(dim=1)[0].unsqueeze(1)
att2 = x.mean(dim=1).unsqueeze(1)
att = torch.cat((att1, att2), dim=1)
att = self.conv(att)
att = self.sigmoid(att)
x = x * att
return x
class CbamBlock(nn.Module):
"""
CBAM attention block for CBAM-ResNet.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
def __init__(self,
channels,
reduction_ratio=16):
super(CbamBlock, self).__init__()
self.ch_gate = ChannelGate(
channels=channels,
reduction_ratio=reduction_ratio)
self.sp_gate = SpatialGate()
def forward(self, x):
x = self.ch_gate(x)
x = self.sp_gate(x)
return x
class CbamResUnit(nn.Module):
"""
CBAM-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck):
super(CbamResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
conv1_stride=False)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.cbam = CbamBlock(channels=out_channels)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = self.cbam(x)
x = x + identity
x = self.activ(x)
return x
class CbamResNet(nn.Module):
"""
CBAM-ResNet model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(CbamResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), CbamResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_resnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create CBAM-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
use_se : bool
Whether to use SE block.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported CBAM-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CbamResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def cbam_resnet18(**kwargs):
"""
CBAM-ResNet-18 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="cbam_resnet18", **kwargs)
def cbam_resnet34(**kwargs):
"""
CBAM-ResNet-34 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="cbam_resnet34", **kwargs)
def cbam_resnet50(**kwargs):
"""
CBAM-ResNet-50 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="cbam_resnet50", **kwargs)
def cbam_resnet101(**kwargs):
"""
CBAM-ResNet-101 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="cbam_resnet101", **kwargs)
def cbam_resnet152(**kwargs):
"""
CBAM-ResNet-152 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="cbam_resnet152", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
cbam_resnet18,
cbam_resnet34,
cbam_resnet50,
cbam_resnet101,
cbam_resnet152,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != cbam_resnet18 or weight_count == 11779392)
assert (model != cbam_resnet34 or weight_count == 21960468)
assert (model != cbam_resnet50 or weight_count == 28089624)
assert (model != cbam_resnet101 or weight_count == 49330172)
assert (model != cbam_resnet152 or weight_count == 66826848)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 12,908
| 28.405467
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/diracnetv2.py
|
"""
DiracNetV2 for ImageNet-1K, implemented in PyTorch.
Original paper: 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
"""
__all__ = ['DiracNetV2', 'diracnet18v2', 'diracnet34v2']
import os
import torch.nn as nn
import torch.nn.init as init
class DiracConv(nn.Module):
"""
DiracNetV2 specific convolution block with pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding):
super(DiracConv, self).__init__()
self.activ = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=True)
def forward(self, x):
x = self.activ(x)
x = self.conv(x)
return x
def dirac_conv3x3(in_channels,
out_channels):
"""
3x3 version of the DiracNetV2 specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
return DiracConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=1,
padding=1)
class DiracInitBlock(nn.Module):
"""
DiracNetV2 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(DiracInitBlock, self).__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=True)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class DiracNetV2(nn.Module):
"""
DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(DiracNetV2, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", DiracInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stage.add_module("unit{}".format(j + 1), dirac_conv3x3(
in_channels=in_channels,
out_channels=out_channels))
in_channels = out_channels
if i != len(channels) - 1:
stage.add_module("pool{}".format(i + 1), nn.MaxPool2d(
kernel_size=2,
stride=2,
padding=0))
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_activ", nn.ReLU(inplace=True))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_diracnetv2(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create DiracNetV2 model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [4, 4, 4, 4]
elif blocks == 34:
layers = [6, 8, 12, 6]
else:
raise ValueError("Unsupported DiracNetV2 with number of blocks: {}".format(blocks))
channels_per_layers = [64, 128, 256, 512]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
init_block_channels = 64
net = DiracNetV2(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def diracnet18v2(**kwargs):
"""
DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diracnetv2(blocks=18, model_name="diracnet18v2", **kwargs)
def diracnet34v2(**kwargs):
"""
DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diracnetv2(blocks=34, model_name="diracnet34v2", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
diracnet18v2,
diracnet34v2,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != diracnet18v2 or weight_count == 11511784)
assert (model != diracnet34v2 or weight_count == 21616232)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 8,444
| 27.72449
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/sepreresnet_cifar.py
|
"""
SE-PreResNet for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['CIFARSEPreResNet', 'sepreresnet20_cifar10', 'sepreresnet20_cifar100', 'sepreresnet20_svhn',
'sepreresnet56_cifar10', 'sepreresnet56_cifar100', 'sepreresnet56_svhn',
'sepreresnet110_cifar10', 'sepreresnet110_cifar100', 'sepreresnet110_svhn',
'sepreresnet164bn_cifar10', 'sepreresnet164bn_cifar100', 'sepreresnet164bn_svhn',
'sepreresnet272bn_cifar10', 'sepreresnet272bn_cifar100', 'sepreresnet272bn_svhn',
'sepreresnet542bn_cifar10', 'sepreresnet542bn_cifar100', 'sepreresnet542bn_svhn',
'sepreresnet1001_cifar10', 'sepreresnet1001_cifar100', 'sepreresnet1001_svhn',
'sepreresnet1202_cifar10', 'sepreresnet1202_cifar100', 'sepreresnet1202_svhn']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv3x3_block
from .sepreresnet import SEPreResUnit
class CIFARSEPreResNet(nn.Module):
"""
SE-PreResNet model for CIFAR from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification num_classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARSEPreResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), SEPreResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=False))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_sepreresnet_cifar(num_classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SE-PreResNet model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification num_classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert (num_classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARSEPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def sepreresnet20_cifar10(num_classes=10, **kwargs):
"""
SE-PreResNet-20 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False,
model_name="sepreresnet20_cifar10", **kwargs)
def sepreresnet20_cifar100(num_classes=100, **kwargs):
"""
SE-PreResNet-20 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False,
model_name="sepreresnet20_cifar100", **kwargs)
def sepreresnet20_svhn(num_classes=10, **kwargs):
"""
SE-PreResNet-20 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="sepreresnet20_svhn",
**kwargs)
def sepreresnet56_cifar10(num_classes=10, **kwargs):
"""
SE-PreResNet-56 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False,
model_name="sepreresnet56_cifar10", **kwargs)
def sepreresnet56_cifar100(num_classes=100, **kwargs):
"""
SE-PreResNet-56 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False,
model_name="sepreresnet56_cifar100", **kwargs)
def sepreresnet56_svhn(num_classes=10, **kwargs):
"""
SE-PreResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="sepreresnet56_svhn",
**kwargs)
def sepreresnet110_cifar10(num_classes=10, **kwargs):
"""
SE-PreResNet-110 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False,
model_name="sepreresnet110_cifar10", **kwargs)
def sepreresnet110_cifar100(num_classes=100, **kwargs):
"""
SE-PreResNet-110 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False,
model_name="sepreresnet110_cifar100", **kwargs)
def sepreresnet110_svhn(num_classes=10, **kwargs):
"""
SE-PreResNet-110 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False,
model_name="sepreresnet110_svhn", **kwargs)
def sepreresnet164bn_cifar10(num_classes=10, **kwargs):
"""
SE-PreResNet-164(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True,
model_name="sepreresnet164bn_cifar10", **kwargs)
def sepreresnet164bn_cifar100(num_classes=100, **kwargs):
"""
SE-PreResNet-164(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True,
model_name="sepreresnet164bn_cifar100", **kwargs)
def sepreresnet164bn_svhn(num_classes=10, **kwargs):
"""
SE-PreResNet-164(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True,
model_name="sepreresnet164bn_svhn", **kwargs)
def sepreresnet272bn_cifar10(num_classes=10, **kwargs):
"""
SE-PreResNet-272(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True,
model_name="sepreresnet272bn_cifar10", **kwargs)
def sepreresnet272bn_cifar100(num_classes=100, **kwargs):
"""
SE-PreResNet-272(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True,
model_name="sepreresnet272bn_cifar100", **kwargs)
def sepreresnet272bn_svhn(num_classes=10, **kwargs):
"""
SE-PreResNet-272(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True,
model_name="sepreresnet272bn_svhn", **kwargs)
def sepreresnet542bn_cifar10(num_classes=10, **kwargs):
"""
SE-PreResNet-542(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True,
model_name="sepreresnet542bn_cifar10", **kwargs)
def sepreresnet542bn_cifar100(num_classes=100, **kwargs):
"""
SE-PreResNet-542(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True,
model_name="sepreresnet542bn_cifar100", **kwargs)
def sepreresnet542bn_svhn(num_classes=10, **kwargs):
"""
SE-PreResNet-542(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True,
model_name="sepreresnet542bn_svhn", **kwargs)
def sepreresnet1001_cifar10(num_classes=10, **kwargs):
"""
SE-PreResNet-1001 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True,
model_name="sepreresnet1001_cifar10", **kwargs)
def sepreresnet1001_cifar100(num_classes=100, **kwargs):
"""
SE-PreResNet-1001 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True,
model_name="sepreresnet1001_cifar100", **kwargs)
def sepreresnet1001_svhn(num_classes=10, **kwargs):
"""
SE-PreResNet-1001 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True,
model_name="sepreresnet1001_svhn", **kwargs)
def sepreresnet1202_cifar10(num_classes=10, **kwargs):
"""
SE-PreResNet-1202 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False,
model_name="sepreresnet1202_cifar10", **kwargs)
def sepreresnet1202_cifar100(num_classes=100, **kwargs):
"""
SE-PreResNet-1202 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 100
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False,
model_name="sepreresnet1202_cifar100", **kwargs)
def sepreresnet1202_svhn(num_classes=10, **kwargs):
"""
SE-PreResNet-1202 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 10
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False,
model_name="sepreresnet1202_svhn", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(sepreresnet20_cifar10, 10),
(sepreresnet20_cifar100, 100),
(sepreresnet20_svhn, 10),
(sepreresnet56_cifar10, 10),
(sepreresnet56_cifar100, 100),
(sepreresnet56_svhn, 10),
(sepreresnet110_cifar10, 10),
(sepreresnet110_cifar100, 100),
(sepreresnet110_svhn, 10),
(sepreresnet164bn_cifar10, 10),
(sepreresnet164bn_cifar100, 100),
(sepreresnet164bn_svhn, 10),
(sepreresnet272bn_cifar10, 10),
(sepreresnet272bn_cifar100, 100),
(sepreresnet272bn_svhn, 10),
(sepreresnet542bn_cifar10, 10),
(sepreresnet542bn_cifar100, 100),
(sepreresnet542bn_svhn, 10),
(sepreresnet1001_cifar10, 10),
(sepreresnet1001_cifar100, 100),
(sepreresnet1001_svhn, 10),
(sepreresnet1202_cifar10, 10),
(sepreresnet1202_cifar100, 100),
(sepreresnet1202_svhn, 10),
]
for model, num_num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sepreresnet20_cifar10 or weight_count == 274559)
assert (model != sepreresnet20_cifar100 or weight_count == 280409)
assert (model != sepreresnet20_svhn or weight_count == 274559)
assert (model != sepreresnet56_cifar10 or weight_count == 862601)
assert (model != sepreresnet56_cifar100 or weight_count == 868451)
assert (model != sepreresnet56_svhn or weight_count == 862601)
assert (model != sepreresnet110_cifar10 or weight_count == 1744664)
assert (model != sepreresnet110_cifar100 or weight_count == 1750514)
assert (model != sepreresnet110_svhn or weight_count == 1744664)
assert (model != sepreresnet164bn_cifar10 or weight_count == 1904882)
assert (model != sepreresnet164bn_cifar100 or weight_count == 1928012)
assert (model != sepreresnet164bn_svhn or weight_count == 1904882)
assert (model != sepreresnet272bn_cifar10 or weight_count == 3152450)
assert (model != sepreresnet272bn_cifar100 or weight_count == 3175580)
assert (model != sepreresnet272bn_svhn or weight_count == 3152450)
assert (model != sepreresnet542bn_cifar10 or weight_count == 6271370)
assert (model != sepreresnet542bn_cifar100 or weight_count == 6294500)
assert (model != sepreresnet542bn_svhn or weight_count == 6271370)
assert (model != sepreresnet1001_cifar10 or weight_count == 11573534)
assert (model != sepreresnet1001_cifar100 or weight_count == 11596664)
assert (model != sepreresnet1001_svhn or weight_count == 11573534)
assert (model != sepreresnet1202_cifar10 or weight_count == 19581938)
assert (model != sepreresnet1202_cifar100 or weight_count == 19587788)
assert (model != sepreresnet1202_svhn or weight_count == 19581938)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_num_classes))
if __name__ == "__main__":
_test()
| 24,663
| 37.298137
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/danet.py
|
"""
DANet for image segmentation, implemented in Gluon.
Original paper: 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
"""
__all__ = ['DANet', 'danet_resnetd50b_cityscapes', 'danet_resnetd101b_cityscapes', 'ScaleBlock']
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.parameter import Parameter
from .common import conv1x1, conv3x3_block
from .resnetd import resnetd50b, resnetd101b
class ScaleBlock(nn.Module):
"""
Simple scale block.
"""
def __init__(self):
super(ScaleBlock, self).__init__()
self.alpha = Parameter(torch.Tensor((1,)))
def forward(self, x):
return self.alpha * x
def __repr__(self):
s = '{name}(alpha={alpha})'
return s.format(
name=self.__class__.__name__,
gamma=self.alpha.shape[0])
def calc_flops(self, x):
assert (x.shape[0] == 1)
num_flops = x.numel()
num_macs = 0
return num_flops, num_macs
class PosAttBlock(nn.Module):
"""
Position attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
It captures long-range spatial contextual information.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 8
Squeeze reduction value.
"""
def __init__(self,
channels,
reduction=8):
super(PosAttBlock, self).__init__()
mid_channels = channels // reduction
self.query_conv = conv1x1(
in_channels=channels,
out_channels=mid_channels,
bias=True)
self.key_conv = conv1x1(
in_channels=channels,
out_channels=mid_channels,
bias=True)
self.value_conv = conv1x1(
in_channels=channels,
out_channels=channels,
bias=True)
self.scale = ScaleBlock()
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
batch, channels, height, width = x.shape
proj_query = self.query_conv(x).view((batch, -1, height * width))
proj_key = self.key_conv(x).view((batch, -1, height * width))
proj_value = self.value_conv(x).view((batch, -1, height * width))
energy = proj_query.transpose(1, 2).contiguous().bmm(proj_key)
w = self.softmax(energy)
y = proj_value.bmm(w.transpose(1, 2).contiguous())
y = y.reshape((batch, -1, height, width))
y = self.scale(y) + x
return y
class ChaAttBlock(nn.Module):
"""
Channel attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
It explicitly models interdependencies between channels.
"""
def __init__(self):
super(ChaAttBlock, self).__init__()
self.scale = ScaleBlock()
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
batch, channels, height, width = x.shape
proj_query = x.view((batch, -1, height * width))
proj_key = x.view((batch, -1, height * width))
proj_value = x.view((batch, -1, height * width))
energy = proj_query.bmm(proj_key.transpose(1, 2).contiguous())
energy_max, _ = energy.max(dim=-1, keepdims=True)
energy_new = energy_max.expand_as(energy) - energy
w = self.softmax(energy_new)
y = w.bmm(proj_value)
y = y.reshape((batch, -1, height, width))
y = self.scale(y) + x
return y
class DANetHeadBranch(nn.Module):
"""
DANet head branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pose_att : bool, default True
Whether to use position attention instead of channel one.
"""
def __init__(self,
in_channels,
out_channels,
pose_att=True):
super(DANetHeadBranch, self).__init__()
mid_channels = in_channels // 4
dropout_rate = 0.1
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
if pose_att:
self.att = PosAttBlock(mid_channels)
else:
self.att = ChaAttBlock()
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels)
self.conv3 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
bias=True)
self.dropout = nn.Dropout(p=dropout_rate, inplace=False)
def forward(self, x):
x = self.conv1(x)
x = self.att(x)
y = self.conv2(x)
x = self.conv3(y)
x = self.dropout(x)
return x, y
class DANetHead(nn.Module):
"""
DANet head block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(DANetHead, self).__init__()
mid_channels = in_channels // 4
dropout_rate = 0.1
self.branch_pa = DANetHeadBranch(
in_channels=in_channels,
out_channels=out_channels,
pose_att=True)
self.branch_ca = DANetHeadBranch(
in_channels=in_channels,
out_channels=out_channels,
pose_att=False)
self.conv = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
bias=True)
self.dropout = nn.Dropout(p=dropout_rate, inplace=False)
def forward(self, x):
pa_x, pa_y = self.branch_pa(x)
ca_x, ca_y = self.branch_ca(x)
y = pa_y + ca_y
x = self.conv(y)
x = self.dropout(x)
return x, pa_x, ca_x
class DANet(nn.Module):
"""
DANet model from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
num_classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
num_classes=19):
super(DANet, self).__init__()
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.num_classes = num_classes
self.aux = aux
self.fixed_size = fixed_size
self.backbone = backbone
self.head = DANetHead(
in_channels=backbone_out_channels,
out_channels=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, _ = self.backbone(x)
x, y, z = self.head(x)
x = F.interpolate(x, size=in_size, mode="bilinear", align_corners=True)
if self.aux:
y = F.interpolate(y, size=in_size, mode="bilinear", align_corners=True)
z = F.interpolate(z, size=in_size, mode="bilinear", align_corners=True)
return x, y, z
else:
return x
def get_danet(backbone,
num_classes,
aux=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create DANet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
num_classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
net = DANet(
backbone=backbone,
num_classes=num_classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def danet_resnetd50b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs):
"""
DANet model on the base of ResNet(D)-50b for Cityscapes from 'Dual Attention Network for Scene Segmentation,'
https://arxiv.org/abs/1809.02983.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_danet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="danet_resnetd50b_cityscapes",
**kwargs)
def danet_resnetd101b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs):
"""
DANet model on the base of ResNet(D)-101b for Cityscapes from 'Dual Attention Network for Scene Segmentation,'
https://arxiv.org/abs/1809.02983.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_danet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="danet_resnetd101b_cityscapes",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
in_size = (480, 480)
aux = True
pretrained = False
models = [
danet_resnetd50b_cityscapes,
danet_resnetd101b_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != danet_resnetd50b_cityscapes or weight_count == 47586427)
assert (model != danet_resnetd101b_cityscapes or weight_count == 66578555)
batch = 2
num_classes = 19
x = torch.randn(batch, 3, in_size[0], in_size[1])
ys = net(x)
y = ys[0] if aux else ys
y.sum().backward()
assert ((y.size(0) == x.size(0)) and (y.size(1) == num_classes) and (y.size(2) == x.size(2)) and
(y.size(3) == x.size(3)))
if __name__ == "__main__":
_test()
| 12,721
| 30.568238
| 116
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/mobilenetv2.py
|
"""
MobileNetV2 for ImageNet-1K, implemented in PyTorch.
Original paper: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381.
"""
__all__ = ['MobileNetV2', 'mobilenetv2_w1', 'mobilenetv2_w3d4', 'mobilenetv2_wd2', 'mobilenetv2_wd4', 'mobilenetv2b_w1',
'mobilenetv2b_w3d4', 'mobilenetv2b_wd2', 'mobilenetv2b_wd4']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block
class LinearBottleneck(nn.Module):
"""
So-called 'Linear Bottleneck' layer. It is used as a MobileNetV2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the second convolution layer.
expansion : bool
Whether do expansion of channels.
remove_exp_conv : bool
Whether to remove expansion convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride,
expansion,
remove_exp_conv):
super(LinearBottleneck, self).__init__()
self.residual = (in_channels == out_channels) and (stride == 1)
mid_channels = in_channels * 6 if expansion else in_channels
self.use_exp_conv = (expansion or (not remove_exp_conv))
if self.use_exp_conv:
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
activation="relu6")
self.conv2 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
activation="relu6")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
if self.residual:
x = x + identity
return x
class MobileNetV2(nn.Module):
"""
MobileNetV2 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
remove_exp_conv : bool
Whether to remove expansion convolution.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
remove_exp_conv,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(MobileNetV2, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
stride=2,
activation="relu6"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
expansion = (i != 0) or (j != 0)
stage.add_module("unit{}".format(j + 1), LinearBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
expansion=expansion,
remove_exp_conv=remove_exp_conv))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
activation="relu6"))
in_channels = final_block_channels
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = conv1x1(
in_channels=in_channels,
out_channels=num_classes,
bias=False)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.output(x)
x = x.view(x.size(0), -1)
return x
def get_mobilenetv2(width_scale,
remove_exp_conv=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create MobileNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
remove_exp_conv : bool, default False
Whether to remove expansion convolution.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
final_block_channels = 1280
layers = [1, 2, 3, 4, 3, 3, 1]
downsample = [0, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 32, 64, 96, 160, 320]
from functools import reduce
channels = reduce(
lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample),
[[]])
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
if width_scale > 1.0:
final_block_channels = int(final_block_channels * width_scale)
net = MobileNetV2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
remove_exp_conv=remove_exp_conv,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def mobilenetv2_w1(**kwargs):
"""
1.0 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=1.0, model_name="mobilenetv2_w1", **kwargs)
def mobilenetv2_w3d4(**kwargs):
"""
0.75 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.75, model_name="mobilenetv2_w3d4", **kwargs)
def mobilenetv2_wd2(**kwargs):
"""
0.5 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.5, model_name="mobilenetv2_wd2", **kwargs)
def mobilenetv2_wd4(**kwargs):
"""
0.25 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.25, model_name="mobilenetv2_wd4", **kwargs)
def mobilenetv2b_w1(**kwargs):
"""
1.0 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=1.0, remove_exp_conv=True, model_name="mobilenetv2b_w1", **kwargs)
def mobilenetv2b_w3d4(**kwargs):
"""
0.75 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.75, remove_exp_conv=True, model_name="mobilenetv2b_w3d4", **kwargs)
def mobilenetv2b_wd2(**kwargs):
"""
0.5 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.5, remove_exp_conv=True, model_name="mobilenetv2b_wd2", **kwargs)
def mobilenetv2b_wd4(**kwargs):
"""
0.25 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.25, remove_exp_conv=True, model_name="mobilenetv2b_wd4", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
mobilenetv2_w1,
mobilenetv2_w3d4,
mobilenetv2_wd2,
mobilenetv2_wd4,
mobilenetv2b_w1,
mobilenetv2b_w3d4,
mobilenetv2b_wd2,
mobilenetv2b_wd4,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetv2_w1 or weight_count == 3504960)
assert (model != mobilenetv2_w3d4 or weight_count == 2627592)
assert (model != mobilenetv2_wd2 or weight_count == 1964736)
assert (model != mobilenetv2_wd4 or weight_count == 1516392)
assert (model != mobilenetv2b_w1 or weight_count == 3503872)
assert (model != mobilenetv2b_w3d4 or weight_count == 2626968)
assert (model != mobilenetv2b_wd2 or weight_count == 1964448)
assert (model != mobilenetv2b_wd4 or weight_count == 1516312)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 12,761
| 32.321149
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/squeezenet.py
|
"""
SqueezeNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,'
https://arxiv.org/abs/1602.07360.
"""
__all__ = ['SqueezeNet', 'squeezenet_v1_0', 'squeezenet_v1_1', 'squeezeresnet_v1_0', 'squeezeresnet_v1_1']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
class FireConv(nn.Module):
"""
SqueezeNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
padding):
super(FireConv, self).__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.activ(x)
return x
class FireUnit(nn.Module):
"""
SqueezeNet unit, so-called 'Fire' unit.
Parameters:
----------
in_channels : int
Number of input channels.
squeeze_channels : int
Number of output channels for squeeze convolution blocks.
expand1x1_channels : int
Number of output channels for expand 1x1 convolution blocks.
expand3x3_channels : int
Number of output channels for expand 3x3 convolution blocks.
residual : bool
Whether use residual connection.
"""
def __init__(self,
in_channels,
squeeze_channels,
expand1x1_channels,
expand3x3_channels,
residual):
super(FireUnit, self).__init__()
self.residual = residual
self.squeeze = FireConv(
in_channels=in_channels,
out_channels=squeeze_channels,
kernel_size=1,
padding=0)
self.expand1x1 = FireConv(
in_channels=squeeze_channels,
out_channels=expand1x1_channels,
kernel_size=1,
padding=0)
self.expand3x3 = FireConv(
in_channels=squeeze_channels,
out_channels=expand3x3_channels,
kernel_size=3,
padding=1)
def forward(self, x):
if self.residual:
identity = x
x = self.squeeze(x)
y1 = self.expand1x1(x)
y2 = self.expand3x3(x)
out = torch.cat((y1, y2), dim=1)
if self.residual:
out = out + identity
return out
class SqueezeInitBlock(nn.Module):
"""
SqueezeNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size):
super(SqueezeInitBlock, self).__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=2)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.activ(x)
return x
class SqueezeNet(nn.Module):
"""
SqueezeNet model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,'
https://arxiv.org/abs/1602.07360.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
residuals : bool
Whether to use residual units.
init_block_kernel_size : int or tuple/list of 2 int
The dimensions of the convolution window for the initial unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
residuals,
init_block_kernel_size,
init_block_channels,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SqueezeNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", SqueezeInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
kernel_size=init_block_kernel_size))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
stage.add_module("pool{}".format(i + 1), nn.MaxPool2d(
kernel_size=3,
stride=2,
ceil_mode=True))
for j, out_channels in enumerate(channels_per_stage):
expand_channels = out_channels // 2
squeeze_channels = out_channels // 8
stage.add_module("unit{}".format(j + 1), FireUnit(
in_channels=in_channels,
squeeze_channels=squeeze_channels,
expand1x1_channels=expand_channels,
expand3x3_channels=expand_channels,
residual=((residuals is not None) and (residuals[i][j] == 1))))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("dropout", nn.Dropout(p=0.5))
self.output = nn.Sequential()
self.output.add_module("final_conv", nn.Conv2d(
in_channels=in_channels,
out_channels=num_classes,
kernel_size=1))
self.output.add_module("final_activ", nn.ReLU(inplace=True))
self.output.add_module("final_pool", nn.AvgPool2d(
kernel_size=13,
stride=1))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
if 'final_conv' in name:
init.normal_(module.weight, mean=0.0, std=0.01)
else:
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.output(x)
x = x.view(x.size(0), -1)
return x
def get_squeezenet(version,
residual=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SqueezeNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('1.0' or '1.1').
residual : bool, default False
Whether to use residual connections.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if version == '1.0':
channels = [[128, 128, 256], [256, 384, 384, 512], [512]]
residuals = [[0, 1, 0], [1, 0, 1, 0], [1]]
init_block_kernel_size = 7
init_block_channels = 96
elif version == '1.1':
channels = [[128, 128], [256, 256], [384, 384, 512, 512]]
residuals = [[0, 1], [0, 1], [0, 1, 0, 1]]
init_block_kernel_size = 3
init_block_channels = 64
else:
raise ValueError("Unsupported SqueezeNet version {}".format(version))
if not residual:
residuals = None
net = SqueezeNet(
channels=channels,
residuals=residuals,
init_block_kernel_size=init_block_kernel_size,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def squeezenet_v1_0(**kwargs):
"""
SqueezeNet 'vanilla' model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.0", residual=False, model_name="squeezenet_v1_0", **kwargs)
def squeezenet_v1_1(**kwargs):
"""
SqueezeNet v1.1 model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.1", residual=False, model_name="squeezenet_v1_1", **kwargs)
def squeezeresnet_v1_0(**kwargs):
"""
SqueezeNet model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and
<0.5MB model size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.0", residual=True, model_name="squeezeresnet_v1_0", **kwargs)
def squeezeresnet_v1_1(**kwargs):
"""
SqueezeNet v1.1 model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.1", residual=True, model_name="squeezeresnet_v1_1", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
pretrained = False
models = [
squeezenet_v1_0,
squeezenet_v1_1,
squeezeresnet_v1_0,
squeezeresnet_v1_1,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != squeezenet_v1_0 or weight_count == 1248424)
assert (model != squeezenet_v1_1 or weight_count == 1235496)
assert (model != squeezeresnet_v1_0 or weight_count == 1248424)
assert (model != squeezeresnet_v1_1 or weight_count == 1235496)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 12,164
| 30.929134
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/nin_cifar.py
|
"""
NIN for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'Network In Network,' https://arxiv.org/abs/1312.4400.
"""
__all__ = ['CIFARNIN', 'nin_cifar10', 'nin_cifar100', 'nin_svhn']
import os
import torch.nn as nn
import torch.nn.init as init
class NINConv(nn.Module):
"""
NIN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0):
super(NINConv, self).__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=True)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.activ(x)
return x
class CIFARNIN(nn.Module):
"""
NIN model for CIFAR from 'Network In Network,' https://arxiv.org/abs/1312.4400.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
first_kernel_sizes : list of int
Convolution window sizes for the first units in each stage.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
first_kernel_sizes,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARNIN, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
if (j == 0) and (i != 0):
if i == 1:
stage.add_module("pool{}".format(i + 1), nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1))
else:
stage.add_module("pool{}".format(i + 1), nn.AvgPool2d(
kernel_size=3,
stride=2,
padding=1))
stage.add_module("dropout{}".format(i + 1), nn.Dropout(p=0.5))
kernel_size = first_kernel_sizes[i] if j == 0 else 1
padding = (kernel_size - 1) // 2
stage.add_module("unit{}".format(j + 1), NINConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.output = nn.Sequential()
self.output.add_module("final_conv", NINConv(
in_channels=in_channels,
out_channels=num_classes,
kernel_size=1))
self.output.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.output(x)
x = x.view(x.size(0), -1)
return x
def get_nin_cifar(num_classes,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create NIN model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels = [[192, 160, 96], [192, 192, 192], [192, 192]]
first_kernel_sizes = [5, 5, 3]
net = CIFARNIN(
channels=channels,
first_kernel_sizes=first_kernel_sizes,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def nin_cifar10(num_classes=10, **kwargs):
"""
NIN model for CIFAR-10 from 'Network In Network,' https://arxiv.org/abs/1312.4400.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_nin_cifar(num_classes=num_classes, model_name="nin_cifar10", **kwargs)
def nin_cifar100(num_classes=100, **kwargs):
"""
NIN model for CIFAR-100 from 'Network In Network,' https://arxiv.org/abs/1312.4400.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_nin_cifar(num_classes=num_classes, model_name="nin_cifar100", **kwargs)
def nin_svhn(num_classes=10, **kwargs):
"""
NIN model for SVHN from 'Network In Network,' https://arxiv.org/abs/1312.4400.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_nin_cifar(num_classes=num_classes, model_name="nin_svhn", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(nin_cifar10, 10),
(nin_cifar100, 100),
(nin_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != nin_cifar10 or weight_count == 966986)
assert (model != nin_cifar100 or weight_count == 984356)
assert (model != nin_svhn or weight_count == 966986)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes))
if __name__ == "__main__":
_test()
| 8,048
| 29.957692
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/vgg.py
|
"""
VGG for ImageNet-1K, implemented in PyTorch.
Original paper: 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
"""
__all__ = ['VGG', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'bn_vgg11', 'bn_vgg13', 'bn_vgg16', 'bn_vgg19', 'bn_vgg11b',
'bn_vgg13b', 'bn_vgg16b', 'bn_vgg19b']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv3x3_block
class VGGDense(nn.Module):
"""
VGG specific dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(VGGDense, self).__init__()
self.fc = nn.Linear(
in_features=in_channels,
out_features=out_channels)
self.activ = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x = self.fc(x)
x = self.activ(x)
x = self.dropout(x)
return x
class VGGOutputBlock(nn.Module):
"""
VGG specific output block.
Parameters:
----------
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
classes):
super(VGGOutputBlock, self).__init__()
mid_channels = 4096
self.fc1 = VGGDense(
in_channels=in_channels,
out_channels=mid_channels)
self.fc2 = VGGDense(
in_channels=mid_channels,
out_channels=mid_channels)
self.fc3 = nn.Linear(
in_features=mid_channels,
out_features=classes)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
class VGG(nn.Module):
"""
VGG models from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default False
Whether to use BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
bias=True,
use_bn=False,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(VGG, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stage.add_module("unit{}".format(j + 1), conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn))
in_channels = out_channels
stage.add_module("pool{}".format(i + 1), nn.MaxPool2d(
kernel_size=2,
stride=2,
padding=0))
self.features.add_module("stage{}".format(i + 1), stage)
self.output = VGGOutputBlock(
in_channels=(in_channels * 7 * 7),
classes=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_vgg(blocks,
bias=True,
use_bn=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create VGG model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default False
Whether to use BatchNorm layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 11:
layers = [1, 1, 2, 2, 2]
elif blocks == 13:
layers = [2, 2, 2, 2, 2]
elif blocks == 16:
layers = [2, 2, 3, 3, 3]
elif blocks == 19:
layers = [2, 2, 4, 4, 4]
else:
raise ValueError("Unsupported VGG with number of blocks: {}".format(blocks))
channels_per_layers = [64, 128, 256, 512, 512]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = VGG(
channels=channels,
bias=bias,
use_bn=use_bn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def vgg11(**kwargs):
"""
VGG-11 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, model_name="vgg11", **kwargs)
def vgg13(**kwargs):
"""
VGG-13 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, model_name="vgg13", **kwargs)
def vgg16(**kwargs):
"""
VGG-16 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, model_name="vgg16", **kwargs)
def vgg19(**kwargs):
"""
VGG-19 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, model_name="vgg19", **kwargs)
def bn_vgg11(**kwargs):
"""
VGG-11 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, bias=False, use_bn=True, model_name="bn_vgg11", **kwargs)
def bn_vgg13(**kwargs):
"""
VGG-13 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, bias=False, use_bn=True, model_name="bn_vgg13", **kwargs)
def bn_vgg16(**kwargs):
"""
VGG-16 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, bias=False, use_bn=True, model_name="bn_vgg16", **kwargs)
def bn_vgg19(**kwargs):
"""
VGG-19 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, bias=False, use_bn=True, model_name="bn_vgg19", **kwargs)
def bn_vgg11b(**kwargs):
"""
VGG-11 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, bias=True, use_bn=True, model_name="bn_vgg11b", **kwargs)
def bn_vgg13b(**kwargs):
"""
VGG-13 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, bias=True, use_bn=True, model_name="bn_vgg13b", **kwargs)
def bn_vgg16b(**kwargs):
"""
VGG-16 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, bias=True, use_bn=True, model_name="bn_vgg16b", **kwargs)
def bn_vgg19b(**kwargs):
"""
VGG-19 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, bias=True, use_bn=True, model_name="bn_vgg19b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
vgg11,
vgg13,
vgg16,
vgg19,
bn_vgg11,
bn_vgg13,
bn_vgg16,
bn_vgg19,
bn_vgg11b,
bn_vgg13b,
bn_vgg16b,
bn_vgg19b,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != vgg11 or weight_count == 132863336)
assert (model != vgg13 or weight_count == 133047848)
assert (model != vgg16 or weight_count == 138357544)
assert (model != vgg19 or weight_count == 143667240)
assert (model != bn_vgg11 or weight_count == 132866088)
assert (model != bn_vgg13 or weight_count == 133050792)
assert (model != bn_vgg16 or weight_count == 138361768)
assert (model != bn_vgg19 or weight_count == 143672744)
assert (model != bn_vgg11b or weight_count == 132868840)
assert (model != bn_vgg13b or weight_count == 133053736)
assert (model != bn_vgg16b or weight_count == 138365992)
assert (model != bn_vgg19b or weight_count == 143678248)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 13,528
| 29.678005
| 117
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/resnet_cub.py
|
"""
ResNet for CUB-200-2011, implemented in PyTorch.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['resnet10_cub', 'resnet12_cub', 'resnet14_cub', 'resnetbc14b_cub', 'resnet16_cub', 'resnet18_cub',
'resnet26_cub', 'resnetbc26b_cub', 'resnet34_cub', 'resnetbc38b_cub', 'resnet50_cub', 'resnet50b_cub',
'resnet101_cub', 'resnet101b_cub', 'resnet152_cub', 'resnet152b_cub', 'resnet200_cub', 'resnet200b_cub']
from .resnet import get_resnet
def resnet10_cub(num_classes=200, **kwargs):
"""
ResNet-10 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=10, model_name="resnet10_cub", **kwargs)
def resnet12_cub(num_classes=200, **kwargs):
"""
ResNet-12 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=12, model_name="resnet12_cub", **kwargs)
def resnet14_cub(num_classes=200, **kwargs):
"""
ResNet-14 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=14, model_name="resnet14_cub", **kwargs)
def resnetbc14b_cub(num_classes=200, **kwargs):
"""
ResNet-BC-14b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=14, bottleneck=True, conv1_stride=False,
model_name="resnetbc14b_cub", **kwargs)
def resnet16_cub(num_classes=200, **kwargs):
"""
ResNet-16 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=16, model_name="resnet16_cub", **kwargs)
def resnet18_cub(num_classes=200, **kwargs):
"""
ResNet-18 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=18, model_name="resnet18_cub", **kwargs)
def resnet26_cub(num_classes=200, **kwargs):
"""
ResNet-26 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=26, bottleneck=False, model_name="resnet26_cub", **kwargs)
def resnetbc26b_cub(num_classes=200, **kwargs):
"""
ResNet-BC-26b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=26, bottleneck=True, conv1_stride=False,
model_name="resnetbc26b_cub", **kwargs)
def resnet34_cub(num_classes=200, **kwargs):
"""
ResNet-34 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=34, model_name="resnet34_cub", **kwargs)
def resnetbc38b_cub(num_classes=200, **kwargs):
"""
ResNet-BC-38b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=38, bottleneck=True, conv1_stride=False,
model_name="resnetbc38b_cub", **kwargs)
def resnet50_cub(num_classes=200, **kwargs):
"""
ResNet-50 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=50, model_name="resnet50_cub", **kwargs)
def resnet50b_cub(num_classes=200, **kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=50, conv1_stride=False, model_name="resnet50b_cub", **kwargs)
def resnet101_cub(num_classes=200, **kwargs):
"""
ResNet-101 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=101, model_name="resnet101_cub", **kwargs)
def resnet101b_cub(num_classes=200, **kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=101, conv1_stride=False, model_name="resnet101b_cub", **kwargs)
def resnet152_cub(num_classes=200, **kwargs):
"""
ResNet-152 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=152, model_name="resnet152_cub", **kwargs)
def resnet152b_cub(num_classes=200, **kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=152, conv1_stride=False, model_name="resnet152b_cub", **kwargs)
def resnet200_cub(num_classes=200, **kwargs):
"""
ResNet-200 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=200, model_name="resnet200_cub", **kwargs)
def resnet200b_cub(num_classes=200, **kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(num_classes=num_classes, blocks=200, conv1_stride=False, model_name="resnet200b_cub", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
resnet10_cub,
resnet12_cub,
resnet14_cub,
resnetbc14b_cub,
resnet16_cub,
resnet18_cub,
resnet26_cub,
resnetbc26b_cub,
resnet34_cub,
resnetbc38b_cub,
resnet50_cub,
resnet50b_cub,
resnet101_cub,
resnet101b_cub,
resnet152_cub,
resnet152b_cub,
resnet200_cub,
resnet200b_cub,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10_cub or weight_count == 5008392)
assert (model != resnet12_cub or weight_count == 5082376)
assert (model != resnet14_cub or weight_count == 5377800)
assert (model != resnetbc14b_cub or weight_count == 8425736)
assert (model != resnet16_cub or weight_count == 6558472)
assert (model != resnet18_cub or weight_count == 11279112)
assert (model != resnet26_cub or weight_count == 17549832)
assert (model != resnetbc26b_cub or weight_count == 14355976)
assert (model != resnet34_cub or weight_count == 21387272)
assert (model != resnetbc38b_cub or weight_count == 20286216)
assert (model != resnet50_cub or weight_count == 23917832)
assert (model != resnet50b_cub or weight_count == 23917832)
assert (model != resnet101_cub or weight_count == 42909960)
assert (model != resnet101b_cub or weight_count == 42909960)
assert (model != resnet152_cub or weight_count == 58553608)
assert (model != resnet152b_cub or weight_count == 58553608)
assert (model != resnet200_cub or weight_count == 63034632)
assert (model != resnet200b_cub or weight_count == 63034632)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 200))
if __name__ == "__main__":
_test()
| 14,148
| 35.094388
| 117
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/bagnet.py
|
"""
BagNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
"""
__all__ = ['BagNet', 'bagnet9', 'bagnet17', 'bagnet33']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, conv1x1_block, conv3x3_block, ConvBlock
class BagNetBottleneck(nn.Module):
"""
BagNet bottleneck block for residual path in BagNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size of the second convolution.
stride : int or tuple/list of 2 int
Strides of the second convolution.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
bottleneck_factor=4):
super(BagNetBottleneck, self).__init__()
mid_channels = out_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = ConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=0)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class BagNetUnit(nn.Module):
"""
BagNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size of the second body convolution.
stride : int or tuple/list of 2 int
Strides of the second body convolution.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride):
super(BagNetUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = BagNetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
if x.size(-1) != identity.size(-1):
diff = identity.size(-1) - x.size(-1)
identity = identity[:, :, :-diff, :-diff]
x = x + identity
x = self.activ(x)
return x
class BagNetInitBlock(nn.Module):
"""
BagNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(BagNetInitBlock, self).__init__()
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=out_channels)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
padding=0)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class BagNet(nn.Module):
"""
BagNet model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_pool_size : int
Size of the pooling windows for final pool.
normal_kernel_sizes : list of int
Count of the first units with 3x3 convolution window size for each stage.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_pool_size,
normal_kernel_sizes,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(BagNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", BagNetInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != len(channels) - 1) else 1
kernel_size = 3 if j < normal_kernel_sizes[i] else 1
stage.add_module("unit{}".format(j + 1), BagNetUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=final_pool_size,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_bagnet(field,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create BagNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
layers = [3, 4, 6, 3]
if field == 9:
normal_kernel_sizes = [1, 1, 0, 0]
final_pool_size = 27
elif field == 17:
normal_kernel_sizes = [1, 1, 1, 0]
final_pool_size = 26
elif field == 33:
normal_kernel_sizes = [1, 1, 1, 1]
final_pool_size = 24
else:
raise ValueError("Unsupported BagNet with field: {}".format(field))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = BagNet(
channels=channels,
init_block_channels=init_block_channels,
final_pool_size=final_pool_size,
normal_kernel_sizes=normal_kernel_sizes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def bagnet9(**kwargs):
"""
BagNet-9 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_bagnet(field=9, model_name="bagnet9", **kwargs)
def bagnet17(**kwargs):
"""
BagNet-17 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_bagnet(field=17, model_name="bagnet17", **kwargs)
def bagnet33(**kwargs):
"""
BagNet-33 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_bagnet(field=33, model_name="bagnet33", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
bagnet9,
bagnet17,
bagnet33,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != bagnet9 or weight_count == 15688744)
assert (model != bagnet17 or weight_count == 16213032)
assert (model != bagnet33 or weight_count == 18310184)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 10,903
| 29.373259
| 116
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/airnet.py
|
"""
AirNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
"""
__all__ = ['AirNet', 'airnet50_1x64d_r2', 'airnet50_1x64d_r16', 'airnet101_1x64d_r2', 'AirBlock', 'AirInitBlock']
import os
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block
class AirBlock(nn.Module):
"""
AirNet attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int, default 1
Number of groups.
ratio: int, default 2
Air compression ratio.
"""
def __init__(self,
in_channels,
out_channels,
groups=1,
ratio=2):
super(AirBlock, self).__init__()
assert (out_channels % ratio == 0)
mid_channels = out_channels // ratio
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
groups=groups)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.conv2(x)
x = F.interpolate(
input=x,
scale_factor=2,
mode="bilinear",
align_corners=True)
x = self.conv3(x)
x = self.sigmoid(x)
return x
class AirBottleneck(nn.Module):
"""
AirNet bottleneck block for residual path in AirNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
ratio: int
Air compression ratio.
"""
def __init__(self,
in_channels,
out_channels,
stride,
ratio):
super(AirBottleneck, self).__init__()
mid_channels = out_channels // 4
self.use_air_block = (stride == 1 and mid_channels < 512)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
if self.use_air_block:
self.air = AirBlock(
in_channels=in_channels,
out_channels=mid_channels,
ratio=ratio)
def forward(self, x):
if self.use_air_block:
att = self.air(x)
x = self.conv1(x)
x = self.conv2(x)
if self.use_air_block:
x = x * att
x = self.conv3(x)
return x
class AirUnit(nn.Module):
"""
AirNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
ratio: int
Air compression ratio.
"""
def __init__(self,
in_channels,
out_channels,
stride,
ratio):
super(AirUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = AirBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
ratio=ratio)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class AirInitBlock(nn.Module):
"""
AirNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(AirInitBlock, self).__init__()
mid_channels = out_channels // 2
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=2)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool(x)
return x
class AirNet(nn.Module):
"""
AirNet model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
ratio: int
Air compression ratio.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
ratio,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(AirNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", AirInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), AirUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
ratio=ratio))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_airnet(blocks,
base_channels,
ratio,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create AirNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
base_channels: int
Base number of channels.
ratio: int
Air compression ratio.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported AirNet with number of blocks: {}".format(blocks))
bottleneck_expansion = 4
init_block_channels = base_channels
channels_per_layers = [base_channels * (2 ** i) * bottleneck_expansion for i in range(len(layers))]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = AirNet(
channels=channels,
init_block_channels=init_block_channels,
ratio=ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def airnet50_1x64d_r2(**kwargs):
"""
AirNet50-1x64d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_airnet(blocks=50, base_channels=64, ratio=2, model_name="airnet50_1x64d_r2", **kwargs)
def airnet50_1x64d_r16(**kwargs):
"""
AirNet50-1x64d (r=16) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_airnet(blocks=50, base_channels=64, ratio=16, model_name="airnet50_1x64d_r16", **kwargs)
def airnet101_1x64d_r2(**kwargs):
"""
AirNet101-1x64d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_airnet(blocks=101, base_channels=64, ratio=2, model_name="airnet101_1x64d_r2", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
airnet50_1x64d_r2,
airnet50_1x64d_r16,
airnet101_1x64d_r2,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != airnet50_1x64d_r2 or weight_count == 27425864)
assert (model != airnet50_1x64d_r16 or weight_count == 25714952)
assert (model != airnet101_1x64d_r2 or weight_count == 51727432)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 12,525
| 28.612293
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/mnasnet.py
|
"""
MnasNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626.
"""
__all__ = ['MnasNet', 'mnasnet_b1', 'mnasnet_a1', 'mnasnet_small']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import round_channels, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock
class DwsExpSEResUnit(nn.Module):
"""
Depthwise separable expanded residual unit with SE-block. Here it used as MnasNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the second convolution layer.
use_kernel3 : bool, default True
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : int, default 1
Expansion factor for each unit.
se_factor : int, default 0
SE reduction factor for each unit.
use_skip : bool, default True
Whether to use skip connection.
activation : str, default 'relu'
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
stride=1,
use_kernel3=True,
exp_factor=1,
se_factor=0,
use_skip=True,
activation="relu"):
super(DwsExpSEResUnit, self).__init__()
assert (exp_factor >= 1)
self.residual = (in_channels == out_channels) and (stride == 1) and use_skip
self.use_exp_conv = exp_factor > 1
self.use_se = se_factor > 0
mid_channels = exp_factor * in_channels
dwconv_block_fn = dwconv3x3_block if use_kernel3 else dwconv5x5_block
if self.use_exp_conv:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
activation=activation)
self.dw_conv = dwconv_block_fn(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
activation=activation)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
round_mid=False,
mid_activation=activation)
self.pw_conv = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x)
x = self.dw_conv(x)
if self.use_se:
x = self.se(x)
x = self.pw_conv(x)
if self.residual:
x = x + identity
return x
class MnasInitBlock(nn.Module):
"""
MnasNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
use_skip : bool
Whether to use skip connection in the second block.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
use_skip):
super(MnasInitBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=2)
self.conv2 = DwsExpSEResUnit(
in_channels=mid_channels,
out_channels=out_channels,
use_skip=use_skip)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class MnasFinalBlock(nn.Module):
"""
MnasNet specific final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
use_skip : bool
Whether to use skip connection in the second block.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
use_skip):
super(MnasFinalBlock, self).__init__()
self.conv1 = DwsExpSEResUnit(
in_channels=in_channels,
out_channels=mid_channels,
exp_factor=6,
use_skip=use_skip)
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class MnasNet(nn.Module):
"""
MnasNet model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : list of 2 int
Number of output channels for the initial unit.
final_block_channels : list of 2 int
Number of output channels for the final block of the feature extractor.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
se_factors : list of list of int
SE reduction factor for each unit.
init_block_use_skip : bool
Whether to use skip connection in the initial unit.
final_block_use_skip : bool
Whether to use skip connection in the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernels3,
exp_factors,
se_factors,
init_block_use_skip,
final_block_use_skip,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(MnasNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", MnasInitBlock(
in_channels=in_channels,
out_channels=init_block_channels[1],
mid_channels=init_block_channels[0],
use_skip=init_block_use_skip))
in_channels = init_block_channels[1]
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) else 1
use_kernel3 = kernels3[i][j] == 1
exp_factor = exp_factors[i][j]
se_factor = se_factors[i][j]
stage.add_module("unit{}".format(j + 1), DwsExpSEResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
se_factor=se_factor))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", MnasFinalBlock(
in_channels=in_channels,
out_channels=final_block_channels[1],
mid_channels=final_block_channels[0],
use_skip=final_block_use_skip))
in_channels = final_block_channels[1]
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_mnasnet(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create MnasNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('b1', 'a1' or 'small').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if version == "b1":
init_block_channels = [32, 16]
final_block_channels = [320, 1280]
channels = [[24, 24, 24], [40, 40, 40], [80, 80, 80, 96, 96], [192, 192, 192, 192]]
kernels3 = [[1, 1, 1], [0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 0, 0]]
exp_factors = [[3, 3, 3], [3, 3, 3], [6, 6, 6, 6, 6], [6, 6, 6, 6]]
se_factors = [[0, 0, 0], [0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0]]
init_block_use_skip = False
final_block_use_skip = False
elif version == "a1":
init_block_channels = [32, 16]
final_block_channels = [320, 1280]
channels = [[24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]]
kernels3 = [[1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]]
exp_factors = [[6, 6], [3, 3, 3], [6, 6, 6, 6, 6, 6], [6, 6, 6]]
se_factors = [[0, 0], [4, 4, 4], [0, 0, 0, 0, 4, 4], [4, 4, 4]]
init_block_use_skip = False
final_block_use_skip = True
elif version == "small":
init_block_channels = [8, 8]
final_block_channels = [144, 1280]
channels = [[16], [16, 16], [32, 32, 32, 32, 32, 32, 32], [88, 88, 88]]
kernels3 = [[1], [1, 1], [0, 0, 0, 0, 1, 1, 1], [0, 0, 0]]
exp_factors = [[3], [6, 6], [6, 6, 6, 6, 6, 6, 6], [6, 6, 6]]
se_factors = [[0], [0, 0], [4, 4, 4, 4, 4, 4, 4], [4, 4, 4]]
init_block_use_skip = True
final_block_use_skip = True
else:
raise ValueError("Unsupported MnasNet version {}".format(version))
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale)
net = MnasNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernels3=kernels3,
exp_factors=exp_factors,
se_factors=se_factors,
init_block_use_skip=init_block_use_skip,
final_block_use_skip=final_block_use_skip,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def mnasnet_b1(**kwargs):
"""
MnasNet-B1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="b1", width_scale=1.0, model_name="mnasnet_b1", **kwargs)
def mnasnet_a1(**kwargs):
"""
MnasNet-A1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="a1", width_scale=1.0, model_name="mnasnet_a1", **kwargs)
def mnasnet_small(**kwargs):
"""
MnasNet-Small model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="small", width_scale=1.0, model_name="mnasnet_small", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
mnasnet_b1,
mnasnet_a1,
mnasnet_small,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mnasnet_b1 or weight_count == 4383312)
assert (model != mnasnet_a1 or weight_count == 3887038)
assert (model != mnasnet_small or weight_count == 2030264)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 14,189
| 32.388235
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/pyramidnet_cifar.py
|
"""
PyramidNet for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
"""
__all__ = ['CIFARPyramidNet', 'pyramidnet110_a48_cifar10', 'pyramidnet110_a48_cifar100', 'pyramidnet110_a48_svhn',
'pyramidnet110_a84_cifar10', 'pyramidnet110_a84_cifar100', 'pyramidnet110_a84_svhn',
'pyramidnet110_a270_cifar10', 'pyramidnet110_a270_cifar100', 'pyramidnet110_a270_svhn',
'pyramidnet164_a270_bn_cifar10', 'pyramidnet164_a270_bn_cifar100', 'pyramidnet164_a270_bn_svhn',
'pyramidnet200_a240_bn_cifar10', 'pyramidnet200_a240_bn_cifar100', 'pyramidnet200_a240_bn_svhn',
'pyramidnet236_a220_bn_cifar10', 'pyramidnet236_a220_bn_cifar100', 'pyramidnet236_a220_bn_svhn',
'pyramidnet272_a200_bn_cifar10', 'pyramidnet272_a200_bn_cifar100', 'pyramidnet272_a200_bn_svhn']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv3x3_block
from .preresnet import PreResActivation
from .pyramidnet import PyrUnit
class CIFARPyramidNet(nn.Module):
"""
PyramidNet model for CIFAR from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARPyramidNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
activation=None))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 1 if (i == 0) or (j != 0) else 2
stage.add_module("unit{}".format(j + 1), PyrUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_pyramidnet_cifar(num_classes,
blocks,
alpha,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create PyramidNet for CIFAR model with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
blocks : int
Number of blocks.
alpha : int
PyramidNet's alpha value.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert (num_classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
growth_add = float(alpha) / float(sum(layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]],
layers,
[[init_block_channels]])[1:]
channels = [[int(round(cij)) for cij in ci] for ci in channels]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARPyramidNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def pyramidnet110_a48_cifar10(num_classes=10, **kwargs):
"""
PyramidNet-110 (a=48) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_cifar10",
**kwargs)
def pyramidnet110_a48_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-110 (a=48) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_cifar100",
**kwargs)
def pyramidnet110_a48_svhn(num_classes=10, **kwargs):
"""
PyramidNet-110 (a=48) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_svhn",
**kwargs)
def pyramidnet110_a84_cifar10(num_classes=10, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar10",
**kwargs)
def pyramidnet110_a84_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar100",
**kwargs)
def pyramidnet110_a84_svhn(num_classes=10, **kwargs):
"""
PyramidNet-110 (a=84) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_svhn",
**kwargs)
def pyramidnet110_a270_cifar10(num_classes=10, **kwargs):
"""
PyramidNet-110 (a=270) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_cifar10",
**kwargs)
def pyramidnet110_a270_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-110 (a=270) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_cifar100",
**kwargs)
def pyramidnet110_a270_svhn(num_classes=10, **kwargs):
"""
PyramidNet-110 (a=270) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_svhn",
**kwargs)
def pyramidnet164_a270_bn_cifar10(num_classes=10, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_cifar10",
**kwargs)
def pyramidnet164_a270_bn_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_cifar100",
**kwargs)
def pyramidnet164_a270_bn_svhn(num_classes=10, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_svhn",
**kwargs)
def pyramidnet200_a240_bn_cifar10(num_classes=10, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_cifar10",
**kwargs)
def pyramidnet200_a240_bn_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_cifar100",
**kwargs)
def pyramidnet200_a240_bn_svhn(num_classes=10, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_svhn",
**kwargs)
def pyramidnet236_a220_bn_cifar10(num_classes=10, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_cifar10",
**kwargs)
def pyramidnet236_a220_bn_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_cifar100",
**kwargs)
def pyramidnet236_a220_bn_svhn(num_classes=10, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_svhn",
**kwargs)
def pyramidnet272_a200_bn_cifar10(num_classes=10, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_cifar10",
**kwargs)
def pyramidnet272_a200_bn_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_cifar100",
**kwargs)
def pyramidnet272_a200_bn_svhn(num_classes=10, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_svhn",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(pyramidnet110_a48_cifar10, 10),
(pyramidnet110_a48_cifar100, 100),
(pyramidnet110_a48_svhn, 10),
(pyramidnet110_a84_cifar10, 10),
(pyramidnet110_a84_cifar100, 100),
(pyramidnet110_a84_svhn, 10),
(pyramidnet110_a270_cifar10, 10),
(pyramidnet110_a270_cifar100, 100),
(pyramidnet110_a270_svhn, 10),
(pyramidnet164_a270_bn_cifar10, 10),
(pyramidnet164_a270_bn_cifar100, 100),
(pyramidnet164_a270_bn_svhn, 10),
(pyramidnet200_a240_bn_cifar10, 10),
(pyramidnet200_a240_bn_cifar100, 100),
(pyramidnet200_a240_bn_svhn, 10),
(pyramidnet236_a220_bn_cifar10, 10),
(pyramidnet236_a220_bn_cifar100, 100),
(pyramidnet236_a220_bn_svhn, 10),
(pyramidnet272_a200_bn_cifar10, 10),
(pyramidnet272_a200_bn_cifar100, 100),
(pyramidnet272_a200_bn_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained, num_classes=num_classes)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pyramidnet110_a48_cifar10 or weight_count == 1772706)
assert (model != pyramidnet110_a48_cifar100 or weight_count == 1778556)
assert (model != pyramidnet110_a48_svhn or weight_count == 1772706)
assert (model != pyramidnet110_a84_cifar10 or weight_count == 3904446)
assert (model != pyramidnet110_a84_cifar100 or weight_count == 3913536)
assert (model != pyramidnet110_a84_svhn or weight_count == 3904446)
assert (model != pyramidnet110_a270_cifar10 or weight_count == 28485477)
assert (model != pyramidnet110_a270_cifar100 or weight_count == 28511307)
assert (model != pyramidnet110_a270_svhn or weight_count == 28485477)
assert (model != pyramidnet164_a270_bn_cifar10 or weight_count == 27216021)
assert (model != pyramidnet164_a270_bn_cifar100 or weight_count == 27319071)
assert (model != pyramidnet164_a270_bn_svhn or weight_count == 27216021)
assert (model != pyramidnet200_a240_bn_cifar10 or weight_count == 26752702)
assert (model != pyramidnet200_a240_bn_cifar100 or weight_count == 26844952)
assert (model != pyramidnet200_a240_bn_svhn or weight_count == 26752702)
assert (model != pyramidnet236_a220_bn_cifar10 or weight_count == 26969046)
assert (model != pyramidnet236_a220_bn_cifar100 or weight_count == 27054096)
assert (model != pyramidnet236_a220_bn_svhn or weight_count == 26969046)
assert (model != pyramidnet272_a200_bn_cifar10 or weight_count == 26210842)
assert (model != pyramidnet272_a200_bn_cifar100 or weight_count == 26288692)
assert (model != pyramidnet272_a200_bn_svhn or weight_count == 26210842)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes))
if __name__ == "__main__":
_test()
| 23,823
| 32.413745
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/preresnet_cifar.py
|
"""
PreResNet for CIFAR/SVHN, implemented in PyTorch.
Original papers: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
"""
__all__ = ['CIFARPreResNet', 'preresnet20_cifar10', 'preresnet20_cifar100', 'preresnet20_svhn',
'preresnet56_cifar10', 'preresnet56_cifar100', 'preresnet56_svhn',
'preresnet110_cifar10', 'preresnet110_cifar100', 'preresnet110_svhn',
'preresnet164bn_cifar10', 'preresnet164bn_cifar100', 'preresnet164bn_svhn',
'preresnet272bn_cifar10', 'preresnet272bn_cifar100', 'preresnet272bn_svhn',
'preresnet542bn_cifar10', 'preresnet542bn_cifar100', 'preresnet542bn_svhn',
'preresnet1001_cifar10', 'preresnet1001_cifar100', 'preresnet1001_svhn',
'preresnet1202_cifar10', 'preresnet1202_cifar100', 'preresnet1202_svhn']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv3x3
from .preresnet import PreResUnit, PreResActivation
class CIFARPreResNet(nn.Module):
"""
PreResNet model for CIFAR from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARPreResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=False))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_preresnet_cifar(num_classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create PreResNet model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert (num_classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def preresnet20_cifar10(num_classes=10, **kwargs):
"""
PreResNet-20 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="preresnet20_cifar10",
**kwargs)
def preresnet20_cifar100(num_classes=100, **kwargs):
"""
PreResNet-20 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="preresnet20_cifar100",
**kwargs)
def preresnet20_svhn(num_classes=10, **kwargs):
"""
PreResNet-20 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="preresnet20_svhn",
**kwargs)
def preresnet56_cifar10(num_classes=10, **kwargs):
"""
PreResNet-56 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="preresnet56_cifar10",
**kwargs)
def preresnet56_cifar100(num_classes=100, **kwargs):
"""
PreResNet-56 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="preresnet56_cifar100",
**kwargs)
def preresnet56_svhn(num_classes=10, **kwargs):
"""
PreResNet-56 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="preresnet56_svhn",
**kwargs)
def preresnet110_cifar10(num_classes=10, **kwargs):
"""
PreResNet-110 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="preresnet110_cifar10",
**kwargs)
def preresnet110_cifar100(num_classes=100, **kwargs):
"""
PreResNet-110 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False,
model_name="preresnet110_cifar100", **kwargs)
def preresnet110_svhn(num_classes=10, **kwargs):
"""
PreResNet-110 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="preresnet110_svhn",
**kwargs)
def preresnet164bn_cifar10(num_classes=10, **kwargs):
"""
PreResNet-164(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True,
model_name="preresnet164bn_cifar10", **kwargs)
def preresnet164bn_cifar100(num_classes=100, **kwargs):
"""
PreResNet-164(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True,
model_name="preresnet164bn_cifar100", **kwargs)
def preresnet164bn_svhn(num_classes=10, **kwargs):
"""
PreResNet-164(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True,
model_name="preresnet164bn_svhn", **kwargs)
def preresnet272bn_cifar10(num_classes=10, **kwargs):
"""
PreResNet-272(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True,
model_name="preresnet272bn_cifar10", **kwargs)
def preresnet272bn_cifar100(num_classes=100, **kwargs):
"""
PreResNet-272(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True,
model_name="preresnet272bn_cifar100", **kwargs)
def preresnet272bn_svhn(num_classes=10, **kwargs):
"""
PreResNet-272(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True,
model_name="preresnet272bn_svhn", **kwargs)
def preresnet542bn_cifar10(num_classes=10, **kwargs):
"""
PreResNet-542(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True,
model_name="preresnet542bn_cifar10", **kwargs)
def preresnet542bn_cifar100(num_classes=100, **kwargs):
"""
PreResNet-542(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True,
model_name="preresnet542bn_cifar100", **kwargs)
def preresnet542bn_svhn(num_classes=10, **kwargs):
"""
PreResNet-542(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True,
model_name="preresnet542bn_svhn", **kwargs)
def preresnet1001_cifar10(num_classes=10, **kwargs):
"""
PreResNet-1001 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True,
model_name="preresnet1001_cifar10", **kwargs)
def preresnet1001_cifar100(num_classes=100, **kwargs):
"""
PreResNet-1001 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True,
model_name="preresnet1001_cifar100", **kwargs)
def preresnet1001_svhn(num_classes=10, **kwargs):
"""
PreResNet-1001 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True,
model_name="preresnet1001_svhn", **kwargs)
def preresnet1202_cifar10(num_classes=10, **kwargs):
"""
PreResNet-1202 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False,
model_name="preresnet1202_cifar10", **kwargs)
def preresnet1202_cifar100(num_classes=100, **kwargs):
"""
PreResNet-1202 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False,
model_name="preresnet1202_cifar100", **kwargs)
def preresnet1202_svhn(num_classes=10, **kwargs):
"""
PreResNet-1202 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False,
model_name="preresnet1202_svhn", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(preresnet20_cifar10, 10),
(preresnet20_cifar100, 100),
(preresnet20_svhn, 10),
(preresnet56_cifar10, 10),
(preresnet56_cifar100, 100),
(preresnet56_svhn, 10),
(preresnet110_cifar10, 10),
(preresnet110_cifar100, 100),
(preresnet110_svhn, 10),
(preresnet164bn_cifar10, 10),
(preresnet164bn_cifar100, 100),
(preresnet164bn_svhn, 10),
(preresnet272bn_cifar10, 10),
(preresnet272bn_cifar100, 100),
(preresnet272bn_svhn, 10),
(preresnet542bn_cifar10, 10),
(preresnet542bn_cifar100, 100),
(preresnet542bn_svhn, 10),
(preresnet1001_cifar10, 10),
(preresnet1001_cifar100, 100),
(preresnet1001_svhn, 10),
(preresnet1202_cifar10, 10),
(preresnet1202_cifar100, 100),
(preresnet1202_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != preresnet20_cifar10 or weight_count == 272282)
assert (model != preresnet20_cifar100 or weight_count == 278132)
assert (model != preresnet20_svhn or weight_count == 272282)
assert (model != preresnet56_cifar10 or weight_count == 855578)
assert (model != preresnet56_cifar100 or weight_count == 861428)
assert (model != preresnet56_svhn or weight_count == 855578)
assert (model != preresnet110_cifar10 or weight_count == 1730522)
assert (model != preresnet110_cifar100 or weight_count == 1736372)
assert (model != preresnet110_svhn or weight_count == 1730522)
assert (model != preresnet164bn_cifar10 or weight_count == 1703258)
assert (model != preresnet164bn_cifar100 or weight_count == 1726388)
assert (model != preresnet164bn_svhn or weight_count == 1703258)
assert (model != preresnet272bn_cifar10 or weight_count == 2816090)
assert (model != preresnet272bn_cifar100 or weight_count == 2839220)
assert (model != preresnet272bn_svhn or weight_count == 2816090)
assert (model != preresnet542bn_cifar10 or weight_count == 5598170)
assert (model != preresnet542bn_cifar100 or weight_count == 5621300)
assert (model != preresnet542bn_svhn or weight_count == 5598170)
assert (model != preresnet1001_cifar10 or weight_count == 10327706)
assert (model != preresnet1001_cifar100 or weight_count == 10350836)
assert (model != preresnet1001_svhn or weight_count == 10327706)
assert (model != preresnet1202_cifar10 or weight_count == 19423834)
assert (model != preresnet1202_cifar100 or weight_count == 19429684)
assert (model != preresnet1202_svhn or weight_count == 19423834)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes))
if __name__ == "__main__":
_test()
| 24,611
| 35.789238
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/alphapose_coco.py
|
"""
AlphaPose for COCO Keypoint, implemented in PyTorch.
Original paper: 'RMPE: Regional Multi-person Pose Estimation,' https://arxiv.org/abs/1612.00137.
"""
__all__ = ['AlphaPose', 'alphapose_fastseresnet101b_coco']
import os
import torch
import torch.nn as nn
from .common import conv3x3, DucBlock, HeatmapMaxDetBlock
from .fastseresnet import fastseresnet101b
class AlphaPose(nn.Module):
"""
AlphaPose model from 'RMPE: Regional Multi-person Pose Estimation,' https://arxiv.org/abs/1612.00137.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
channels : list of int
Number of output channels for each decoder unit.
return_heatmap : bool, default False
Whether to return only heatmap.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 192)
Spatial size of the expected input image.
keypoints : int, default 17
Number of keypoints.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
return_heatmap=False,
in_channels=3,
in_size=(256, 192),
keypoints=17):
super(AlphaPose, self).__init__()
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.return_heatmap = return_heatmap
self.backbone = backbone
self.decoder = nn.Sequential()
self.decoder.add_module("init_block", nn.PixelShuffle(upscale_factor=2))
in_channels = backbone_out_channels // 4
for i, out_channels in enumerate(channels):
self.decoder.add_module("unit{}".format(i + 1), DucBlock(
in_channels=in_channels,
out_channels=out_channels,
scale_factor=2))
in_channels = out_channels
self.decoder.add_module("final_block", conv3x3(
in_channels=in_channels,
out_channels=keypoints,
bias=True))
self.heatmap_max_det = HeatmapMaxDetBlock()
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.backbone(x)
heatmap = self.decoder(x)
if self.return_heatmap:
return heatmap
else:
keypoints = self.heatmap_max_det(heatmap)
return keypoints
def get_alphapose(backbone,
backbone_out_channels,
keypoints,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create AlphaPose model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
keypoints : int
Number of keypoints.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels = [256, 128]
net = AlphaPose(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
keypoints=keypoints,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def alphapose_fastseresnet101b_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
AlphaPose model on the base of ResNet-101b for COCO Keypoint from 'RMPE: Regional Multi-person Pose Estimation,'
https://arxiv.org/abs/1612.00137.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = fastseresnet101b(pretrained=pretrained_backbone).features
del backbone[-1]
return get_alphapose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="alphapose_fastseresnet101b_coco", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
in_size = (256, 192)
keypoints = 17
return_heatmap = False
pretrained = False
models = [
alphapose_fastseresnet101b_coco,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != alphapose_fastseresnet101b_coco or weight_count == 59569873)
batch = 14
x = torch.randn(batch, 3, in_size[0], in_size[1])
y = net(x)
assert ((y.shape[0] == batch) and (y.shape[1] == keypoints))
if return_heatmap:
assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4))
else:
assert (y.shape[2] == 3)
if __name__ == "__main__":
_test()
| 6,247
| 30.877551
| 116
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/pyramidnet.py
|
"""
PyramidNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
"""
__all__ = ['PyramidNet', 'pyramidnet101_a360', 'PyrUnit']
import os
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from .common import pre_conv1x1_block, pre_conv3x3_block
from .preresnet import PreResActivation
class PyrBlock(nn.Module):
"""
Simple PyramidNet block for residual path in PyramidNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride):
super(PyrBlock, self).__init__()
self.conv1 = pre_conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activate=False)
self.conv2 = pre_conv3x3_block(
in_channels=out_channels,
out_channels=out_channels)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class PyrBottleneck(nn.Module):
"""
PyramidNet bottleneck block for residual path in PyramidNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride):
super(PyrBottleneck, self).__init__()
mid_channels = out_channels // 4
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
activate=False)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class PyrUnit(nn.Module):
"""
PyramidNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck):
super(PyrUnit, self).__init__()
assert (out_channels >= in_channels)
self.resize_identity = (stride != 1)
self.identity_pad_width = (0, 0, 0, 0, 0, out_channels - in_channels)
if bottleneck:
self.body = PyrBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
else:
self.body = PyrBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.bn = nn.BatchNorm2d(num_features=out_channels)
if self.resize_identity:
self.identity_pool = nn.AvgPool2d(
kernel_size=2,
stride=stride,
ceil_mode=True)
def forward(self, x):
identity = x
x = self.body(x)
x = self.bn(x)
if self.resize_identity:
identity = self.identity_pool(identity)
identity = F.pad(identity, pad=self.identity_pad_width)
x = x + identity
return x
class PyrInitBlock(nn.Module):
"""
PyramidNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(PyrInitBlock, self).__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.bn = nn.BatchNorm2d(num_features=out_channels)
self.activ = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
x = self.pool(x)
return x
class PyramidNet(nn.Module):
"""
PyramidNet model from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(PyramidNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", PyrInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 1 if (i == 0) or (j != 0) else 2
stage.add_module("unit{}".format(j + 1), PyrUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_pyramidnet(blocks,
alpha,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create PyramidNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
alpha : int
PyramidNet's alpha value.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14:
layers = [2, 2, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
growth_add = float(alpha) / float(sum(layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]],
layers,
[[init_block_channels]])[1:]
channels = [[int(round(cij)) for cij in ci] for ci in channels]
if blocks < 50:
bottleneck = False
else:
bottleneck = True
channels = [[cij * 4 for cij in ci] for ci in channels]
net = PyramidNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def pyramidnet101_a360(**kwargs):
"""
PyramidNet-101 model from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet(blocks=101, alpha=360, model_name="pyramidnet101_a360", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
pyramidnet101_a360,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pyramidnet101_a360 or weight_count == 42455070)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 11,038
| 28.126649
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/seresnet.py
|
"""
SE-ResNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEResNet', 'seresnet10', 'seresnet12', 'seresnet14', 'seresnet16', 'seresnet18', 'seresnet26',
'seresnetbc26b', 'seresnet34', 'seresnetbc38b', 'seresnet50', 'seresnet50b', 'seresnet101', 'seresnet101b',
'seresnet152', 'seresnet152b', 'seresnet200', 'seresnet200b', 'SEResUnit', 'get_seresnet']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, SEBlock
from .resnet import ResBlock, ResBottleneck, ResInitBlock
class SEResUnit(nn.Module):
"""
SE-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck,
conv1_stride):
super(SEResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = self.se(x)
x = x + identity
x = self.activ(x)
return x
class SEResNet(nn.Module):
"""
SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SEResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), SEResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_seresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SE-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SE-ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def seresnet10(**kwargs):
"""
SE-ResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=10, model_name="seresnet10", **kwargs)
def seresnet12(**kwargs):
"""
SE-ResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=12, model_name="seresnet12", **kwargs)
def seresnet14(**kwargs):
"""
SE-ResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=14, model_name="seresnet14", **kwargs)
def seresnet16(**kwargs):
"""
SE-ResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=16, model_name="seresnet16", **kwargs)
def seresnet18(**kwargs):
"""
SE-ResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=18, model_name="seresnet18", **kwargs)
def seresnet26(**kwargs):
"""
SE-ResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=26, bottleneck=False, model_name="seresnet26", **kwargs)
def seresnetbc26b(**kwargs):
"""
SE-ResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="seresnetbc26b", **kwargs)
def seresnet34(**kwargs):
"""
SE-ResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=34, model_name="seresnet34", **kwargs)
def seresnetbc38b(**kwargs):
"""
SE-ResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="seresnetbc38b", **kwargs)
def seresnet50(**kwargs):
"""
SE-ResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=50, model_name="seresnet50", **kwargs)
def seresnet50b(**kwargs):
"""
SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=50, conv1_stride=False, model_name="seresnet50b", **kwargs)
def seresnet101(**kwargs):
"""
SE-ResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=101, model_name="seresnet101", **kwargs)
def seresnet101b(**kwargs):
"""
SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=101, conv1_stride=False, model_name="seresnet101b", **kwargs)
def seresnet152(**kwargs):
"""
SE-ResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=152, model_name="seresnet152", **kwargs)
def seresnet152b(**kwargs):
"""
SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=152, conv1_stride=False, model_name="seresnet152b", **kwargs)
def seresnet200(**kwargs):
"""
SE-ResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=200, model_name="seresnet200", **kwargs)
def seresnet200b(**kwargs):
"""
SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=200, conv1_stride=False, model_name="seresnet200b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
seresnet10,
seresnet12,
seresnet14,
seresnet16,
seresnet18,
seresnet26,
seresnetbc26b,
seresnet34,
seresnetbc38b,
seresnet50,
seresnet50b,
seresnet101,
seresnet101b,
seresnet152,
seresnet152b,
seresnet200,
seresnet200b,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet10 or weight_count == 5463332)
assert (model != seresnet12 or weight_count == 5537896)
assert (model != seresnet14 or weight_count == 5835504)
assert (model != seresnet16 or weight_count == 7024640)
assert (model != seresnet18 or weight_count == 11778592)
assert (model != seresnet26 or weight_count == 18093852)
assert (model != seresnetbc26b or weight_count == 17395976)
assert (model != seresnet34 or weight_count == 21958868)
assert (model != seresnetbc38b or weight_count == 24026616)
assert (model != seresnet50 or weight_count == 28088024)
assert (model != seresnet50b or weight_count == 28088024)
assert (model != seresnet101 or weight_count == 49326872)
assert (model != seresnet101b or weight_count == 49326872)
assert (model != seresnet152 or weight_count == 66821848)
assert (model != seresnet152b or weight_count == 66821848)
assert (model != seresnet200 or weight_count == 71835864)
assert (model != seresnet200b or weight_count == 71835864)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 18,211
| 31.579606
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/seresnet_cub.py
|
"""
SE-ResNet for CUB-200-2011, implemented in PyTorch.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['seresnet10_cub', 'seresnet12_cub', 'seresnet14_cub', 'seresnetbc14b_cub', 'seresnet16_cub',
'seresnet18_cub', 'seresnet26_cub', 'seresnetbc26b_cub', 'seresnet34_cub', 'seresnetbc38b_cub',
'seresnet50_cub', 'seresnet50b_cub', 'seresnet101_cub', 'seresnet101b_cub', 'seresnet152_cub',
'seresnet152b_cub', 'seresnet200_cub', 'seresnet200b_cub']
from .seresnet import get_seresnet
def seresnet10_cub(num_classes=200, **kwargs):
"""
SE-ResNet-10 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=10, model_name="seresnet10_cub", **kwargs)
def seresnet12_cub(num_classes=200, **kwargs):
"""
SE-ResNet-12 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=12, model_name="seresnet12_cub", **kwargs)
def seresnet14_cub(num_classes=200, **kwargs):
"""
SE-ResNet-14 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=14, model_name="seresnet14_cub", **kwargs)
def seresnetbc14b_cub(num_classes=200, **kwargs):
"""
SE-ResNet-BC-14b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=14, bottleneck=True, conv1_stride=False,
model_name="seresnetbc14b_cub", **kwargs)
def seresnet16_cub(num_classes=200, **kwargs):
"""
SE-ResNet-16 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=16, model_name="seresnet16_cub", **kwargs)
def seresnet18_cub(num_classes=200, **kwargs):
"""
SE-ResNet-18 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=18, model_name="seresnet18_cub", **kwargs)
def seresnet26_cub(num_classes=200, **kwargs):
"""
SE-ResNet-26 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=26, bottleneck=False, model_name="seresnet26_cub", **kwargs)
def seresnetbc26b_cub(num_classes=200, **kwargs):
"""
SE-ResNet-BC-26b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=26, bottleneck=True, conv1_stride=False,
model_name="seresnetbc26b_cub", **kwargs)
def seresnet34_cub(num_classes=200, **kwargs):
"""
SE-ResNet-34 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=34, model_name="seresnet34_cub", **kwargs)
def seresnetbc38b_cub(num_classes=200, **kwargs):
"""
SE-ResNet-BC-38b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=38, bottleneck=True, conv1_stride=False,
model_name="seresnetbc38b_cub", **kwargs)
def seresnet50_cub(num_classes=200, **kwargs):
"""
SE-ResNet-50 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=50, model_name="seresnet50_cub", **kwargs)
def seresnet50b_cub(num_classes=200, **kwargs):
"""
SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,'
https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=50, conv1_stride=False, model_name="seresnet50b_cub", **kwargs)
def seresnet101_cub(num_classes=200, **kwargs):
"""
SE-ResNet-101 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=101, model_name="seresnet101_cub", **kwargs)
def seresnet101b_cub(num_classes=200, **kwargs):
"""
SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=101, conv1_stride=False, model_name="seresnet101b_cub",
**kwargs)
def seresnet152_cub(num_classes=200, **kwargs):
"""
SE-ResNet-152 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=152, model_name="seresnet152_cub", **kwargs)
def seresnet152b_cub(num_classes=200, **kwargs):
"""
SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=152, conv1_stride=False, model_name="seresnet152b_cub",
**kwargs)
def seresnet200_cub(num_classes=200, **kwargs):
"""
SE-ResNet-200 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=200, model_name="seresnet200_cub", **kwargs)
def seresnet200b_cub(num_classes=200, **kwargs):
"""
SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
num_classes : int, default 200
Number of classification num_classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnet(num_classes=num_classes, blocks=200, conv1_stride=False, model_name="seresnet200b_cub",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
seresnet10_cub,
seresnet12_cub,
seresnet14_cub,
seresnetbc14b_cub,
seresnet16_cub,
seresnet18_cub,
seresnet26_cub,
seresnetbc26b_cub,
seresnet34_cub,
seresnetbc38b_cub,
seresnet50_cub,
seresnet50b_cub,
seresnet101_cub,
seresnet101b_cub,
seresnet152_cub,
seresnet152b_cub,
seresnet200_cub,
seresnet200b_cub,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet10_cub or weight_count == 5052932)
assert (model != seresnet12_cub or weight_count == 5127496)
assert (model != seresnet14_cub or weight_count == 5425104)
assert (model != seresnetbc14b_cub or weight_count == 9126136)
assert (model != seresnet16_cub or weight_count == 6614240)
assert (model != seresnet18_cub or weight_count == 11368192)
assert (model != seresnet26_cub or weight_count == 17683452)
assert (model != seresnetbc26b_cub or weight_count == 15756776)
assert (model != seresnet34_cub or weight_count == 21548468)
assert (model != seresnetbc38b_cub or weight_count == 22387416)
assert (model != seresnet50_cub or weight_count == 26448824)
assert (model != seresnet50b_cub or weight_count == 26448824)
assert (model != seresnet101_cub or weight_count == 47687672)
assert (model != seresnet101b_cub or weight_count == 47687672)
assert (model != seresnet152_cub or weight_count == 65182648)
assert (model != seresnet152b_cub or weight_count == 65182648)
assert (model != seresnet200_cub or weight_count == 70196664)
assert (model != seresnet200b_cub or weight_count == 70196664)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 200))
if __name__ == "__main__":
_test()
| 14,391
| 35.808184
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/densenet.py
|
"""
DenseNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
"""
__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201', 'DenseUnit', 'TransitionBlock']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import pre_conv1x1_block, pre_conv3x3_block
from .preresnet import PreResInitBlock, PreResActivation
class DenseUnit(nn.Module):
"""
DenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
dropout_rate):
super(DenseUnit, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=inc_channels)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
x = torch.cat((identity, x), dim=1)
return x
class TransitionBlock(nn.Module):
"""
DenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the
first unit of each stage.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(TransitionBlock, self).__init__()
self.conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
self.pool = nn.AvgPool2d(
kernel_size=2,
stride=2,
padding=0)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class DenseNet(nn.Module):
"""
DenseNet model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
dropout_rate=0.0,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(DenseNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
if i != 0:
stage.add_module("trans{}".format(i + 1), TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2)))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
stage.add_module("unit{}".format(j + 1), DenseUnit(
in_channels=in_channels,
out_channels=out_channels,
dropout_rate=dropout_rate))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_densenet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create DenseNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif blocks == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif blocks == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif blocks == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError("Unsupported DenseNet version with number of layers {}".format(blocks))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = DenseNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def densenet121(**kwargs):
"""
DenseNet-121 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=121, model_name="densenet121", **kwargs)
def densenet161(**kwargs):
"""
DenseNet-161 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=161, model_name="densenet161", **kwargs)
def densenet169(**kwargs):
"""
DenseNet-169 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=169, model_name="densenet169", **kwargs)
def densenet201(**kwargs):
"""
DenseNet-201 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=201, model_name="densenet201", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
densenet121,
densenet161,
densenet169,
densenet201,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != densenet121 or weight_count == 7978856)
assert (model != densenet161 or weight_count == 28681000)
assert (model != densenet169 or weight_count == 14149480)
assert (model != densenet201 or weight_count == 20013928)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 9,930
| 29.556923
| 116
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/seresnext.py
|
"""
SE-ResNeXt for ImageNet-1K, implemented in PyTorch.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEResNeXt', 'seresnext50_32x4d', 'seresnext101_32x4d', 'seresnext101_64x4d']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, SEBlock
from .resnet import ResInitBlock
from .resnext import ResNeXtBottleneck
class SEResNeXtUnit(nn.Module):
"""
SE-ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality,
bottleneck_width):
super(SEResNeXtUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = ResNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width)
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = self.se(x)
x = x + identity
x = self.activ(x)
return x
class SEResNeXt(nn.Module):
"""
SE-ResNeXt model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SEResNeXt, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), SEResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_seresnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SE-ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported SE-ResNeXt with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def seresnext50_32x4d(**kwargs):
"""
SE-ResNeXt-50 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="seresnext50_32x4d", **kwargs)
def seresnext101_32x4d(**kwargs):
"""
SE-ResNeXt-101 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="seresnext101_32x4d", **kwargs)
def seresnext101_64x4d(**kwargs):
"""
SE-ResNeXt-101 (64x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_seresnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="seresnext101_64x4d", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
seresnext50_32x4d,
seresnext101_32x4d,
seresnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnext50_32x4d or weight_count == 27559896)
assert (model != seresnext101_32x4d or weight_count == 48955416)
assert (model != seresnext101_64x4d or weight_count == 88232984)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 8,721
| 29.929078
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/darts.py
|
"""
DARTS for ImageNet-1K, implemented in PyTorch.
Original paper: 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055.
"""
__all__ = ['DARTS', 'darts']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, Identity
from .nasnet import nasnet_dual_path_sequential
class DwsConv(nn.Module):
"""
Standard dilated depthwise separable convolution block with.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
bias : bool, default False
Whether the layers use a bias vector.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
bias=False):
super(DwsConv, self).__init__()
self.dw_conv = nn.Conv2d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=bias)
self.pw_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
bias=bias)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
class DartsConv(nn.Module):
"""
DARTS specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
activate=True):
super(DartsConv, self).__init__()
self.activate = activate
if self.activate:
self.activ = nn.ReLU(inplace=False)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False)
self.bn = nn.BatchNorm2d(num_features=out_channels)
def forward(self, x):
if self.activate:
x = self.activ(x)
x = self.conv(x)
x = self.bn(x)
return x
def darts_conv1x1(in_channels,
out_channels,
activate=True):
"""
1x1 version of the DARTS specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activate : bool, default True
Whether activate the convolution block.
"""
return DartsConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
activate=activate)
def darts_conv3x3_s2(in_channels,
out_channels,
activate=True):
"""
3x3 version of the DARTS specific convolution block with stride 2.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activate : bool, default True
Whether activate the convolution block.
"""
return DartsConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=2,
padding=1,
activate=activate)
class DartsDwsConv(nn.Module):
"""
DARTS specific dilated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation):
super(DartsDwsConv, self).__init__()
self.activ = nn.ReLU(inplace=False)
self.conv = DwsConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=False)
self.bn = nn.BatchNorm2d(num_features=out_channels)
def forward(self, x):
x = self.activ(x)
x = self.conv(x)
x = self.bn(x)
return x
class DartsDwsBranch(nn.Module):
"""
DARTS specific block with depthwise separable convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding):
super(DartsDwsBranch, self).__init__()
mid_channels = in_channels
self.conv1 = DartsDwsConv(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=1)
self.conv2 = DartsDwsConv(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
dilation=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class DartsReduceBranch(nn.Module):
"""
DARTS specific factorized reduce block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 2
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride=2):
super(DartsReduceBranch, self).__init__()
assert (out_channels % 2 == 0)
mid_channels = out_channels // 2
self.activ = nn.ReLU(inplace=False)
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
stride=stride)
self.conv2 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
stride=stride)
self.bn = nn.BatchNorm2d(num_features=out_channels)
def forward(self, x):
x = self.activ(x)
x1 = self.conv1(x)
x = x[:, :, 1:, 1:].contiguous()
x2 = self.conv2(x)
x = torch.cat((x1, x2), dim=1)
x = self.bn(x)
return x
class Stem1Unit(nn.Module):
"""
DARTS Stem1 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(Stem1Unit, self).__init__()
mid_channels = out_channels // 2
self.conv1 = darts_conv3x3_s2(
in_channels=in_channels,
out_channels=mid_channels,
activate=False)
self.conv2 = darts_conv3x3_s2(
in_channels=mid_channels,
out_channels=out_channels,
activate=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def stem2_unit(in_channels,
out_channels):
"""
DARTS Stem2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
return darts_conv3x3_s2(
in_channels=in_channels,
out_channels=out_channels,
activate=True)
def darts_maxpool3x3(channels,
stride):
"""
DARTS specific 3x3 Max pooling layer.
Parameters:
----------
channels : int
Number of input/output channels. Unused parameter.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
assert (channels > 0)
return nn.MaxPool2d(
kernel_size=3,
stride=stride,
padding=1)
def darts_skip_connection(channels,
stride):
"""
DARTS specific skip connection layer.
Parameters:
----------
channels : int
Number of input/output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
assert (channels > 0)
if stride == 1:
return Identity()
else:
assert (stride == 2)
return DartsReduceBranch(
in_channels=channels,
out_channels=channels,
stride=stride)
def darts_dws_conv3x3(channels,
stride):
"""
3x3 version of DARTS specific dilated convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
return DartsDwsConv(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=2,
dilation=2)
def darts_dws_branch3x3(channels,
stride):
"""
3x3 version of DARTS specific dilated convolution branch.
Parameters:
----------
channels : int
Number of input/output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
return DartsDwsBranch(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=1)
# Set of operations in genotype.
GENOTYPE_OPS = {
'max_pool_3x3': darts_maxpool3x3,
'skip_connect': darts_skip_connection,
'dil_conv_3x3': darts_dws_conv3x3,
'sep_conv_3x3': darts_dws_branch3x3,
}
class DartsMainBlock(nn.Module):
"""
DARTS main block, described by genotype.
Parameters:
----------
genotype : list of tuples (str, int)
List of genotype elements (operations and linked indices).
channels : int
Number of input/output channels.
reduction : bool
Whether use reduction.
"""
def __init__(self,
genotype,
channels,
reduction):
super(DartsMainBlock, self).__init__()
self.concat = [2, 3, 4, 5]
op_names, indices = zip(*genotype)
self.indices = indices
self.steps = len(op_names) // 2
self.ops = nn.ModuleList()
for name, index in zip(op_names, indices):
stride = 2 if reduction and index < 2 else 1
op = GENOTYPE_OPS[name](channels, stride)
self.ops += [op]
def forward(self, x, x_prev):
s0 = x_prev
s1 = x
states = [s0, s1]
for i in range(self.steps):
j1 = 2 * i
j2 = 2 * i + 1
op1 = self.ops[j1]
op2 = self.ops[j2]
y1 = states[self.indices[j1]]
y2 = states[self.indices[j2]]
y1 = op1(y1)
y2 = op2(y2)
s = y1 + y2
states += [s]
x_out = torch.cat([states[i] for i in self.concat], dim=1)
return x_out
class DartsUnit(nn.Module):
"""
DARTS unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
genotype : list of tuples (str, int)
List of genotype elements (operations and linked indices).
reduction : bool
Whether use reduction.
prev_reduction : bool
Whether use previous reduction.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
genotype,
reduction,
prev_reduction):
super(DartsUnit, self).__init__()
mid_channels = out_channels // 4
if prev_reduction:
self.preprocess_prev = DartsReduceBranch(
in_channels=prev_in_channels,
out_channels=mid_channels)
else:
self.preprocess_prev = darts_conv1x1(
in_channels=prev_in_channels,
out_channels=mid_channels)
self.preprocess = darts_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.body = DartsMainBlock(
genotype=genotype,
channels=mid_channels,
reduction=reduction)
def forward(self, x, x_prev):
x = self.preprocess(x)
x_prev = self.preprocess_prev(x_prev)
x_out = self.body(x, x_prev)
return x_out
class DARTS(nn.Module):
"""
DARTS model from 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
stem_blocks_channels : int
Number of output channels for the Stem units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
stem_blocks_channels,
normal_genotype,
reduce_genotype,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(DARTS, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nasnet_dual_path_sequential(
return_two=False,
first_ordinals=2,
last_ordinals=1)
self.features.add_module("stem1_unit", Stem1Unit(
in_channels=in_channels,
out_channels=stem_blocks_channels))
in_channels = stem_blocks_channels
self.features.add_module("stem2_unit", stem2_unit(
in_channels=in_channels,
out_channels=stem_blocks_channels))
prev_in_channels = in_channels
in_channels = stem_blocks_channels
for i, channels_per_stage in enumerate(channels):
stage = nasnet_dual_path_sequential()
for j, out_channels in enumerate(channels_per_stage):
reduction = (i != 0) and (j == 0)
prev_reduction = ((i == 0) and (j == 0)) or ((i != 0) and (j == 1))
genotype = reduce_genotype if reduction else normal_genotype
stage.add_module("unit{}".format(j + 1), DartsUnit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
genotype=genotype,
reduction=reduction,
prev_reduction=prev_reduction))
prev_in_channels = in_channels
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_darts(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create DARTS model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
stem_blocks_channels = 48
layers = [4, 5, 5]
channels_per_layers = [192, 384, 768]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
normal_genotype = [
('sep_conv_3x3', 0),
('sep_conv_3x3', 1),
('sep_conv_3x3', 0),
('sep_conv_3x3', 1),
('sep_conv_3x3', 1),
('skip_connect', 0),
('skip_connect', 0),
('dil_conv_3x3', 2)]
reduce_genotype = [
('max_pool_3x3', 0),
('max_pool_3x3', 1),
('skip_connect', 2),
('max_pool_3x3', 1),
('max_pool_3x3', 0),
('skip_connect', 2),
('skip_connect', 2),
('max_pool_3x3', 1)]
net = DARTS(
channels=channels,
stem_blocks_channels=stem_blocks_channels,
normal_genotype=normal_genotype,
reduce_genotype=reduce_genotype,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def darts(**kwargs):
"""
DARTS model from 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_darts(model_name="darts", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
darts,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != darts or weight_count == 4718752)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 20,291
| 26.683492
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/drn.py
|
"""
DRN for ImageNet-1K, implemented in PyTorch.
Original paper: 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
"""
__all__ = ['DRN', 'drnc26', 'drnc42', 'drnc58', 'drnd22', 'drnd38', 'drnd54', 'drnd105']
import os
import torch.nn as nn
import torch.nn.init as init
class DRNConv(nn.Module):
"""
DRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
activate : bool
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
activate):
super(DRNConv, self).__init__()
self.activate = activate
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=False)
self.bn = nn.BatchNorm2d(num_features=out_channels)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def drn_conv1x1(in_channels,
out_channels,
stride,
activate):
"""
1x1 version of the DRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
"""
return DRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
dilation=1,
activate=activate)
def drn_conv3x3(in_channels,
out_channels,
stride,
dilation,
activate):
"""
3x3 version of the DRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for convolution layer.
activate : bool
Whether activate the convolution block.
"""
return DRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
activate=activate)
class DRNBlock(nn.Module):
"""
Simple DRN block for residual path in DRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for convolution layers.
"""
def __init__(self,
in_channels,
out_channels,
stride,
dilation):
super(DRNBlock, self).__init__()
self.conv1 = drn_conv3x3(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
dilation=dilation,
activate=True)
self.conv2 = drn_conv3x3(
in_channels=out_channels,
out_channels=out_channels,
stride=1,
dilation=dilation,
activate=False)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class DRNBottleneck(nn.Module):
"""
DRN bottleneck block for residual path in DRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for 3x3 convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
stride,
dilation):
super(DRNBottleneck, self).__init__()
mid_channels = out_channels // 4
self.conv1 = drn_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
stride=1,
activate=True)
self.conv2 = drn_conv3x3(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
dilation=dilation,
activate=True)
self.conv3 = drn_conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
stride=1,
activate=False)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class DRNUnit(nn.Module):
"""
DRN unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for 3x3 convolution layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
simplified : bool
Whether to use a simple or simplified block in units.
residual : bool
Whether do residual calculations.
"""
def __init__(self,
in_channels,
out_channels,
stride,
dilation,
bottleneck,
simplified,
residual):
super(DRNUnit, self).__init__()
assert residual or (not bottleneck)
assert (not (bottleneck and simplified))
assert (not (residual and simplified))
self.residual = residual
self.resize_identity = ((in_channels != out_channels) or (stride != 1)) and self.residual and (not simplified)
if bottleneck:
self.body = DRNBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
dilation=dilation)
elif simplified:
self.body = drn_conv3x3(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
dilation=dilation,
activate=False)
else:
self.body = DRNBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
dilation=dilation)
if self.resize_identity:
self.identity_conv = drn_conv1x1(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activate=False)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
if self.residual:
x = x + identity
x = self.activ(x)
return x
def drn_init_block(in_channels,
out_channels):
"""
DRN specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
return DRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=1,
padding=3,
dilation=1,
activate=True)
class DRN(nn.Module):
"""
DRN-C&D model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dilations : list of list of int
Dilation values for 3x3 convolution layers for each unit.
bottlenecks : list of list of int
Whether to use a bottleneck or simple block in each unit.
simplifieds : list of list of int
Whether to use a simple or simplified block in each unit.
residuals : list of list of int
Whether to use residual block in each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
dilations,
bottlenecks,
simplifieds,
residuals,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(DRN, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", drn_init_block(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), DRNUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
dilation=dilations[i][j],
bottleneck=(bottlenecks[i][j] == 1),
simplified=(simplifieds[i][j] == 1),
residual=(residuals[i][j] == 1)))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=28,
stride=1))
self.output = nn.Conv2d(
in_channels=in_channels,
out_channels=num_classes,
kernel_size=1)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.output(x)
x = x.view(x.size(0), -1)
return x
def get_drn(blocks,
simplified=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create DRN-C or DRN-D model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
simplified : bool, default False
Whether to use simplified scheme (D architecture).
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 22:
assert simplified
layers = [1, 1, 2, 2, 2, 2, 1, 1]
elif blocks == 26:
layers = [1, 1, 2, 2, 2, 2, 1, 1]
elif blocks == 38:
assert simplified
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 42:
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 54:
assert simplified
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 58:
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 105:
assert simplified
layers = [1, 1, 3, 4, 23, 3, 1, 1]
else:
raise ValueError("Unsupported DRN with number of blocks: {}".format(blocks))
if blocks < 50:
channels_per_layers = [16, 32, 64, 128, 256, 512, 512, 512]
bottlenecks_per_layers = [0, 0, 0, 0, 0, 0, 0, 0]
else:
channels_per_layers = [16, 32, 256, 512, 1024, 2048, 512, 512]
bottlenecks_per_layers = [0, 0, 1, 1, 1, 1, 0, 0]
if simplified:
simplifieds_per_layers = [1, 1, 0, 0, 0, 0, 1, 1]
residuals_per_layers = [0, 0, 1, 1, 1, 1, 0, 0]
else:
simplifieds_per_layers = [0, 0, 0, 0, 0, 0, 0, 0]
residuals_per_layers = [1, 1, 1, 1, 1, 1, 0, 0]
dilations_per_layers = [1, 1, 1, 1, 2, 4, 2, 1]
downsample = [0, 1, 1, 1, 0, 0, 0, 0]
def expand(property_per_layers):
from functools import reduce
return reduce(
lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(property_per_layers, layers, downsample),
[[]])
channels = expand(channels_per_layers)
dilations = expand(dilations_per_layers)
bottlenecks = expand(bottlenecks_per_layers)
residuals = expand(residuals_per_layers)
simplifieds = expand(simplifieds_per_layers)
init_block_channels = channels_per_layers[0]
net = DRN(
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
bottlenecks=bottlenecks,
simplifieds=simplifieds,
residuals=residuals,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def drnc26(**kwargs):
"""
DRN-C-26 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=26, model_name="drnc26", **kwargs)
def drnc42(**kwargs):
"""
DRN-C-42 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=42, model_name="drnc42", **kwargs)
def drnc58(**kwargs):
"""
DRN-C-58 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=58, model_name="drnc58", **kwargs)
def drnd22(**kwargs):
"""
DRN-D-58 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=22, simplified=True, model_name="drnd22", **kwargs)
def drnd38(**kwargs):
"""
DRN-D-38 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=38, simplified=True, model_name="drnd38", **kwargs)
def drnd54(**kwargs):
"""
DRN-D-54 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=54, simplified=True, model_name="drnd54", **kwargs)
def drnd105(**kwargs):
"""
DRN-D-105 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=105, simplified=True, model_name="drnd105", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
drnc26,
drnc42,
drnc58,
drnd22,
drnd38,
drnd54,
drnd105,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != drnc26 or weight_count == 21126584)
assert (model != drnc42 or weight_count == 31234744)
assert (model != drnc58 or weight_count == 40542008) # 41591608
assert (model != drnd22 or weight_count == 16393752)
assert (model != drnd38 or weight_count == 26501912)
assert (model != drnd54 or weight_count == 35809176)
assert (model != drnd105 or weight_count == 54801304)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 18,826
| 28.695584
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/mixnet.py
|
"""
MixNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
"""
__all__ = ['MixNet', 'mixnet_s', 'mixnet_m', 'mixnet_l']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import round_channels, get_activation_layer, conv1x1_block, conv3x3_block, dwconv3x3_block, SEBlock
class MixConv(nn.Module):
"""
Mixed convolution layer from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
axis : int, default 1
The axis on which to concatenate the outputs.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
axis=1):
super(MixConv, self).__init__()
kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size]
padding = padding if isinstance(padding, list) else [padding]
kernel_count = len(kernel_size)
self.splitted_in_channels = self.split_channels(in_channels, kernel_count)
splitted_out_channels = self.split_channels(out_channels, kernel_count)
for i, kernel_size_i in enumerate(kernel_size):
in_channels_i = self.splitted_in_channels[i]
out_channels_i = splitted_out_channels[i]
padding_i = padding[i]
self.add_module(
name=str(i),
module=nn.Conv2d(
in_channels=in_channels_i,
out_channels=out_channels_i,
kernel_size=kernel_size_i,
stride=stride,
padding=padding_i,
dilation=dilation,
groups=(out_channels_i if out_channels == groups else groups),
bias=bias))
self.axis = axis
def forward(self, x):
xx = torch.split(x, self.splitted_in_channels, dim=self.axis)
out = [conv_i(x_i) for x_i, conv_i in zip(xx, self._modules.values())]
x = torch.cat(tuple(out), dim=self.axis)
return x
@staticmethod
def split_channels(channels, kernel_count):
splitted_channels = [channels // kernel_count] * kernel_count
splitted_channels[0] += channels - sum(splitted_channels)
return splitted_channels
class MixConvBlock(nn.Module):
"""
Mixed convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(MixConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = MixConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def mixconv1x1_block(in_channels,
out_channels,
kernel_count,
stride=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
1x1 version of the mixed convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_count : int
Kernel count.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str, or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return MixConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=([1] * kernel_count),
stride=stride,
padding=([0] * kernel_count),
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
class MixUnit(nn.Module):
"""
MixNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
exp_channels : int
Number of middle (expanded) channels.
stride : int or tuple/list of 2 int
Strides of the second convolution layer.
exp_kernel_count : int
Expansion convolution kernel count for each unit.
conv1_kernel_count : int
Conv1 kernel count for each unit.
conv2_kernel_count : int
Conv2 kernel count for each unit.
exp_factor : int
Expansion factor for each unit.
se_factor : int
SE reduction factor for each unit.
activation : str
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
stride,
exp_kernel_count,
conv1_kernel_count,
conv2_kernel_count,
exp_factor,
se_factor,
activation):
super(MixUnit, self).__init__()
assert (exp_factor >= 1)
assert (se_factor >= 0)
self.residual = (in_channels == out_channels) and (stride == 1)
self.use_se = se_factor > 0
mid_channels = exp_factor * in_channels
self.use_exp_conv = exp_factor > 1
if self.use_exp_conv:
if exp_kernel_count == 1:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
activation=activation)
else:
self.exp_conv = mixconv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
kernel_count=exp_kernel_count,
activation=activation)
if conv1_kernel_count == 1:
self.conv1 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
activation=activation)
else:
self.conv1 = MixConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=[3 + 2 * i for i in range(conv1_kernel_count)],
stride=stride,
padding=[1 + i for i in range(conv1_kernel_count)],
groups=mid_channels,
activation=activation)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
round_mid=False,
mid_activation=activation)
if conv2_kernel_count == 1:
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
else:
self.conv2 = mixconv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
kernel_count=conv2_kernel_count,
activation=None)
def forward(self, x):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x)
x = self.conv1(x)
if self.use_se:
x = self.se(x)
x = self.conv2(x)
if self.residual:
x = x + identity
return x
class MixInitBlock(nn.Module):
"""
MixNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(MixInitBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2)
self.conv2 = MixUnit(
in_channels=out_channels,
out_channels=out_channels,
stride=1,
exp_kernel_count=1,
conv1_kernel_count=1,
conv2_kernel_count=1,
exp_factor=1,
se_factor=0,
activation="relu")
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class MixNet(nn.Module):
"""
MixNet model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
exp_kernel_counts : list of list of int
Expansion convolution kernel count for each unit.
conv1_kernel_counts : list of list of int
Conv1 kernel count for each unit.
conv2_kernel_counts : list of list of int
Conv2 kernel count for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
se_factors : list of list of int
SE reduction factor for each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
exp_kernel_counts,
conv1_kernel_counts,
conv2_kernel_counts,
exp_factors,
se_factors,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(MixNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", MixInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if ((j == 0) and (i != 3)) or ((j == len(channels_per_stage) // 2) and (i == 3)) else 1
exp_kernel_count = exp_kernel_counts[i][j]
conv1_kernel_count = conv1_kernel_counts[i][j]
conv2_kernel_count = conv2_kernel_counts[i][j]
exp_factor = exp_factors[i][j]
se_factor = se_factors[i][j]
activation = "relu" if i == 0 else "swish"
stage.add_module("unit{}".format(j + 1), MixUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
exp_kernel_count=exp_kernel_count,
conv1_kernel_count=conv1_kernel_count,
conv2_kernel_count=conv2_kernel_count,
exp_factor=exp_factor,
se_factor=se_factor,
activation=activation))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels))
in_channels = final_block_channels
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_mixnet(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create MixNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('s' or 'm').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if version == "s":
init_block_channels = 16
channels = [[24, 24], [40, 40, 40, 40], [80, 80, 80], [120, 120, 120, 200, 200, 200]]
exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 1, 1], [2, 2, 2, 1, 1, 1]]
conv1_kernel_counts = [[1, 1], [3, 2, 2, 2], [3, 2, 2], [3, 4, 4, 5, 4, 4]]
conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [2, 2, 2], [2, 2, 2, 1, 2, 2]]
exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6], [6, 3, 3, 6, 6, 6]]
se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4], [2, 2, 2, 2, 2, 2]]
elif version == "m":
init_block_channels = 24
channels = [[32, 32], [40, 40, 40, 40], [80, 80, 80, 80], [120, 120, 120, 120, 200, 200, 200, 200]]
exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2, 1, 1, 1, 1]]
conv1_kernel_counts = [[3, 1], [4, 2, 2, 2], [3, 4, 4, 4], [1, 4, 4, 4, 4, 4, 4, 4]]
conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2, 1, 2, 2, 2]]
exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6, 6], [6, 3, 3, 3, 6, 6, 6, 6]]
se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4, 4], [2, 2, 2, 2, 2, 2, 2, 2]]
else:
raise ValueError("Unsupported MixNet version {}".format(version))
final_block_channels = 1536
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale)
net = MixNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
exp_kernel_counts=exp_kernel_counts,
conv1_kernel_counts=conv1_kernel_counts,
conv2_kernel_counts=conv2_kernel_counts,
exp_factors=exp_factors,
se_factors=se_factors,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def mixnet_s(**kwargs):
"""
MixNet-S model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mixnet(version="s", width_scale=1.0, model_name="mixnet_s", **kwargs)
def mixnet_m(**kwargs):
"""
MixNet-M model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mixnet(version="m", width_scale=1.0, model_name="mixnet_m", **kwargs)
def mixnet_l(**kwargs):
"""
MixNet-L model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mixnet(version="m", width_scale=1.3, model_name="mixnet_l", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
mixnet_s,
mixnet_m,
mixnet_l,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mixnet_s or weight_count == 4134606)
assert (model != mixnet_m or weight_count == 5014382)
assert (model != mixnet_l or weight_count == 7329252)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 20,528
| 33.386935
| 116
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/dabnet.py
|
"""
DABNet for image segmentation, implemented in PyTorch.
Original paper: 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
"""
__all__ = ['DABNet', 'dabnet_cityscapes']
import os
import torch
import torch.nn as nn
from .common import conv1x1, conv3x3, conv3x3_block, ConvBlock, NormActivation, Concurrent, InterpolationBlock,\
DualPathSequential
class DwaConvBlock(nn.Module):
"""
Depthwise asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
kernel_size : int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(DwaConvBlock, self).__init__()
self.conv1 = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(kernel_size, 1),
stride=stride,
padding=(padding, 0),
dilation=(dilation, 1),
groups=channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
self.conv2 = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(1, kernel_size),
stride=stride,
padding=(0, padding),
dilation=(1, dilation),
groups=channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def dwa_conv3x3_block(channels,
stride=1,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 version of the depthwise asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
stride : int, default 1
Strides of the convolution.
padding : int, default 1
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return DwaConvBlock(
channels=channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
class DABBlock(nn.Module):
"""
DABNet specific base block.
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for a dilated branch in the unit.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
channels,
dilation,
bn_eps):
super(DABBlock, self).__init__()
mid_channels = channels // 2
self.norm_activ1 = NormActivation(
in_channels=channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(channels)))
self.conv1 = conv3x3_block(
in_channels=channels,
out_channels=mid_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(mid_channels)))
self.branches = Concurrent(stack=True)
self.branches.add_module("branches1", dwa_conv3x3_block(
channels=mid_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(mid_channels))))
self.branches.add_module("branches2", dwa_conv3x3_block(
channels=mid_channels,
padding=dilation,
dilation=dilation,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(mid_channels))))
self.norm_activ2 = NormActivation(
in_channels=mid_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(mid_channels)))
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels)
def forward(self, x):
identity = x
x = self.norm_activ1(x)
x = self.conv1(x)
x = self.branches(x)
x = x.sum(dim=1)
x = self.norm_activ2(x)
x = self.conv2(x)
x = x + identity
return x
class DownBlock(nn.Module):
"""
DABNet specific downsample block for the main branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps):
super(DownBlock, self).__init__()
self.expand = (in_channels < out_channels)
mid_channels = out_channels - in_channels if self.expand else out_channels
self.conv = conv3x3(
in_channels=in_channels,
out_channels=mid_channels,
stride=2)
if self.expand:
self.pool = nn.MaxPool2d(
kernel_size=2,
stride=2)
self.norm_activ = NormActivation(
in_channels=out_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(out_channels)))
def forward(self, x):
y = self.conv(x)
if self.expand:
z = self.pool(x)
y = torch.cat((y, z), dim=1)
y = self.norm_activ(y)
return y
class DABUnit(nn.Module):
"""
DABNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dilations : list of int
Dilations for blocks.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
dilations,
bn_eps):
super(DABUnit, self).__init__()
mid_channels = out_channels // 2
self.down = DownBlock(
in_channels=in_channels,
out_channels=mid_channels,
bn_eps=bn_eps)
self.blocks = nn.Sequential()
for i, dilation in enumerate(dilations):
self.blocks.add_module("block{}".format(i + 1), DABBlock(
channels=mid_channels,
dilation=dilation,
bn_eps=bn_eps))
def forward(self, x):
x = self.down(x)
y = self.blocks(x)
x = torch.cat((y, x), dim=1)
return x
class DABStage(nn.Module):
"""
DABNet stage.
Parameters:
----------
x_channels : int
Number of input/output channels for x.
y_in_channels : int
Number of input channels for y.
y_out_channels : int
Number of output channels for y.
dilations : list of int
Dilations for blocks.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
x_channels,
y_in_channels,
y_out_channels,
dilations,
bn_eps):
super(DABStage, self).__init__()
self.use_unit = (len(dilations) > 0)
self.x_down = nn.AvgPool2d(
kernel_size=3,
stride=2,
padding=1)
if self.use_unit:
self.unit = DABUnit(
in_channels=y_in_channels,
out_channels=(y_out_channels - x_channels),
dilations=dilations,
bn_eps=bn_eps)
self.norm_activ = NormActivation(
in_channels=y_out_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(y_out_channels)))
def forward(self, y, x):
x = self.x_down(x)
if self.use_unit:
y = self.unit(y)
y = torch.cat((y, x), dim=1)
y = self.norm_activ(y)
return y, x
class DABInitBlock(nn.Module):
"""
DABNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps):
super(DABInitBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(out_channels)))
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(out_channels)))
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(out_channels)))
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class DABNet(nn.Module):
"""
DABNet model from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
Parameters:
----------
channels : list of int
Number of output channels for each unit (for y-branch).
init_block_channels : int
Number of output channels for the initial unit.
dilations : list of list of int
Dilations for blocks.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
num_classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
channels,
init_block_channels,
dilations,
bn_eps=1e-5,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
num_classes=19):
super(DABNet, self).__init__()
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.num_classes = num_classes
self.fixed_size = fixed_size
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=0)
self.features.add_module("init_block", DABInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_eps=bn_eps))
y_in_channels = init_block_channels
for i, (y_out_channels, dilations_i) in enumerate(zip(channels, dilations)):
self.features.add_module("stage{}".format(i + 1), DABStage(
x_channels=in_channels,
y_in_channels=y_in_channels,
y_out_channels=y_out_channels,
dilations=dilations_i,
bn_eps=bn_eps))
y_in_channels = y_out_channels
self.classifier = conv1x1(
in_channels=y_in_channels,
out_channels=num_classes)
self.up = InterpolationBlock(
scale_factor=8,
align_corners=False)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
y = self.features(x, x)
y = self.classifier(y)
y = self.up(y, size=in_size)
return y
def get_dabnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create DABNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
channels = [35, 131, 259]
dilations = [[], [2, 2, 2], [4, 4, 8, 8, 16, 16]]
bn_eps = 1e-3
net = DABNet(
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
bn_eps=bn_eps,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def dabnet_cityscapes(num_classes=19, **kwargs):
"""
DABNet model for Cityscapes from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
Parameters:
----------
num_classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_dabnet(num_classes=num_classes, model_name="dabnet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
pretrained = False
fixed_size = True
in_size = (1024, 2048)
classes = 19
models = [
dabnet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dabnet_cityscapes or weight_count == 756643)
batch = 4
x = torch.randn(batch, 3, in_size[0], in_size[1])
y = net(x)
# y.sum().backward()
assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 16,345
| 28.505415
| 116
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/cgnet.py
|
"""
CGNet for image segmentation, implemented in PyTorch.
Original paper: 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,'
https://arxiv.org/abs/1811.08201.
"""
__all__ = ['CGNet', 'cgnet_cityscapes']
import os
import torch
import torch.nn as nn
from .common import NormActivation, conv1x1, conv1x1_block, conv3x3_block, depthwise_conv3x3, SEBlock, Concurrent,\
DualPathSequential, InterpolationBlock
class CGBlock(nn.Module):
"""
CGNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dilation : int
Dilation value.
se_reduction : int
SE-block reduction value.
down : bool
Whether to downsample.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
dilation,
se_reduction,
down,
bn_eps):
super(CGBlock, self).__init__()
self.down = down
if self.down:
mid1_channels = out_channels
mid2_channels = 2 * out_channels
else:
mid1_channels = out_channels // 2
mid2_channels = out_channels
if self.down:
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(out_channels)))
else:
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid1_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(mid1_channels)))
self.branches = Concurrent()
self.branches.add_module("branches1", depthwise_conv3x3(channels=mid1_channels))
self.branches.add_module("branches2", depthwise_conv3x3(
channels=mid1_channels,
padding=dilation,
dilation=dilation))
self.norm_activ = NormActivation(
in_channels=mid2_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(mid2_channels)))
if self.down:
self.conv2 = conv1x1(
in_channels=mid2_channels,
out_channels=out_channels)
self.se = SEBlock(
channels=out_channels,
reduction=se_reduction,
use_conv=False)
def forward(self, x):
if not self.down:
identity = x
x = self.conv1(x)
x = self.branches(x)
x = self.norm_activ(x)
if self.down:
x = self.conv2(x)
x = self.se(x)
if not self.down:
x += identity
return x
class CGUnit(nn.Module):
"""
CGNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
layers : int
Number of layers.
dilation : int
Dilation value.
se_reduction : int
SE-block reduction value.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
layers,
dilation,
se_reduction,
bn_eps):
super(CGUnit, self).__init__()
mid_channels = out_channels // 2
self.down = CGBlock(
in_channels=in_channels,
out_channels=mid_channels,
dilation=dilation,
se_reduction=se_reduction,
down=True,
bn_eps=bn_eps)
self.blocks = nn.Sequential()
for i in range(layers - 1):
self.blocks.add_module("block{}".format(i + 1), CGBlock(
in_channels=mid_channels,
out_channels=mid_channels,
dilation=dilation,
se_reduction=se_reduction,
down=False,
bn_eps=bn_eps))
def forward(self, x):
x = self.down(x)
y = self.blocks(x)
x = torch.cat((y, x), dim=1) # NB: This differs from the original implementation.
return x
class CGStage(nn.Module):
"""
CGNet stage.
Parameters:
----------
x_channels : int
Number of input/output channels for x.
y_in_channels : int
Number of input channels for y.
y_out_channels : int
Number of output channels for y.
layers : int
Number of layers in the unit.
dilation : int
Dilation for blocks.
se_reduction : int
SE-block reduction value for blocks.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
x_channels,
y_in_channels,
y_out_channels,
layers,
dilation,
se_reduction,
bn_eps):
super(CGStage, self).__init__()
self.use_x = (x_channels > 0)
self.use_unit = (layers > 0)
if self.use_x:
self.x_down = nn.AvgPool2d(
kernel_size=3,
stride=2,
padding=1)
if self.use_unit:
self.unit = CGUnit(
in_channels=y_in_channels,
out_channels=(y_out_channels - x_channels),
layers=layers,
dilation=dilation,
se_reduction=se_reduction,
bn_eps=bn_eps)
self.norm_activ = NormActivation(
in_channels=y_out_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(y_out_channels)))
def forward(self, y, x=None):
if self.use_unit:
y = self.unit(y)
if self.use_x:
x = self.x_down(x)
y = torch.cat((y, x), dim=1)
y = self.norm_activ(y)
return y, x
class CGInitBlock(nn.Module):
"""
CGNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps):
super(CGInitBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(out_channels)))
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(out_channels)))
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(out_channels)))
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class CGNet(nn.Module):
"""
CGNet model from 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,'
https://arxiv.org/abs/1811.08201.
Parameters:
----------
layers : list of int
Number of layers for each unit.
channels : list of int
Number of output channels for each unit (for y-branch).
init_block_channels : int
Number of output channels for the initial unit.
dilations : list of int
Dilations for each unit.
se_reductions : list of int
SE-block reduction value for each unit.
cut_x : list of int
Whether to concatenate with x-branch for each unit.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
num_classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
layers,
channels,
init_block_channels,
dilations,
se_reductions,
cut_x,
bn_eps=1e-5,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
num_classes=19):
super(CGNet, self).__init__()
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.num_classes = num_classes
self.fixed_size = fixed_size
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=0)
self.features.add_module("init_block", CGInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_eps=bn_eps))
y_in_channels = init_block_channels
for i, (layers_i, y_out_channels) in enumerate(zip(layers, channels)):
self.features.add_module("stage{}".format(i + 1), CGStage(
x_channels=in_channels if cut_x[i] == 1 else 0,
y_in_channels=y_in_channels,
y_out_channels=y_out_channels,
layers=layers_i,
dilation=dilations[i],
se_reduction=se_reductions[i],
bn_eps=bn_eps))
y_in_channels = y_out_channels
self.classifier = conv1x1(
in_channels=y_in_channels,
out_channels=num_classes)
self.up = InterpolationBlock(
scale_factor=8,
align_corners=False)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
y = self.features(x, x)
y = self.classifier(y)
y = self.up(y, size=in_size)
return y
def get_cgnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create CGNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
layers = [0, 3, 21]
channels = [35, 131, 256]
dilations = [0, 2, 4]
se_reductions = [0, 8, 16]
cut_x = [1, 1, 0]
bn_eps = 1e-3
net = CGNet(
layers=layers,
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
se_reductions=se_reductions,
cut_x=cut_x,
bn_eps=bn_eps,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def cgnet_cityscapes(num_classes=19, **kwargs):
"""
CGNet model for Cityscapes from 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,'
https://arxiv.org/abs/1811.08201.
Parameters:
----------
num_classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_cgnet(num_classes=num_classes, model_name="cgnet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
pretrained = False
fixed_size = True
in_size = (1024, 2048)
classes = 19
models = [
cgnet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != cgnet_cityscapes or weight_count == 496306)
batch = 4
x = torch.randn(batch, 3, in_size[0], in_size[1])
y = net(x)
# y.sum().backward()
assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 13,575
| 28.577342
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/wrn1bit_cifar.py
|
"""
WRN-1bit for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'Training wide residual networks for deployment using a single bit for each weight,'
https://arxiv.org/abs/1802.08530.
"""
__all__ = ['CIFARWRN1bit', 'wrn20_10_1bit_cifar10', 'wrn20_10_1bit_cifar100', 'wrn20_10_1bit_svhn',
'wrn20_10_32bit_cifar10', 'wrn20_10_32bit_cifar100', 'wrn20_10_32bit_svhn']
import os
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
class Binarize(torch.autograd.Function):
"""
Fake sign op for 1-bit weights.
"""
@staticmethod
def forward(ctx, x):
return math.sqrt(2.0 / (x.shape[1] * x.shape[2] * x.shape[3])) * x.sign()
@staticmethod
def backward(ctx, dy):
return dy
class Conv2d1bit(nn.Conv2d):
"""
Standard convolution block with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding=1,
dilation=1,
groups=1,
bias=False,
binarized=False):
super(Conv2d1bit, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
self.binarized = binarized
def forward(self, input):
weight = Binarize.apply(self.weight) if self.binarized else self.weight
bias = Binarize.apply(self.bias) if self.bias is not None and self.binarized else self.bias
return F.conv2d(
input=input,
weight=weight,
bias=bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups)
def conv1x1_1bit(in_channels,
out_channels,
stride=1,
groups=1,
bias=False,
binarized=False):
"""
Convolution 1x1 layer with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
binarized : bool, default False
Whether to use binarization.
"""
return Conv2d1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups,
bias=bias,
binarized=binarized)
def conv3x3_1bit(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False,
binarized=False):
"""
Convolution 3x3 layer with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
binarized : bool, default False
Whether to use binarization.
"""
return Conv2d1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
binarized=binarized)
class ConvBlock1bit(nn.Module):
"""
Standard convolution block with Batch normalization and ReLU activation, and binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
bn_affine=True,
activate=True,
binarized=False):
super(ConvBlock1bit, self).__init__()
self.activate = activate
self.conv = Conv2d1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
binarized=binarized)
self.bn = nn.BatchNorm2d(
num_features=out_channels,
affine=bn_affine)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block_1bit(in_channels,
out_channels,
stride=1,
padding=0,
groups=1,
bias=False,
bn_affine=True,
activate=True,
binarized=False):
"""
1x1 version of the standard convolution block with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
return ConvBlock1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
bn_affine=bn_affine,
activate=activate,
binarized=binarized)
class PreConvBlock1bit(nn.Module):
"""
Convolution block with Batch normalization and ReLU pre-activation, and binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
bn_affine=True,
return_preact=False,
activate=True,
binarized=False):
super(PreConvBlock1bit, self).__init__()
self.return_preact = return_preact
self.activate = activate
self.bn = nn.BatchNorm2d(
num_features=in_channels,
affine=bn_affine)
if self.activate:
self.activ = nn.ReLU(inplace=True)
self.conv = Conv2d1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
binarized=binarized)
def forward(self, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv3x3_block_1bit(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bn_affine=True,
return_preact=False,
activate=True,
binarized=False):
"""
3x3 version of the pre-activated convolution block with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
return PreConvBlock1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bn_affine=bn_affine,
return_preact=return_preact,
activate=activate,
binarized=binarized)
class PreResBlock1bit(nn.Module):
"""
Simple PreResNet block for residual path in ResNet unit (with binarization).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
stride,
binarized=False):
super(PreResBlock1bit, self).__init__()
self.conv1 = pre_conv3x3_block_1bit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bn_affine=False,
return_preact=False,
binarized=binarized)
self.conv2 = pre_conv3x3_block_1bit(
in_channels=out_channels,
out_channels=out_channels,
bn_affine=False,
binarized=binarized)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class PreResUnit1bit(nn.Module):
"""
PreResNet unit with residual connection (with binarization).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
stride,
binarized=False):
super(PreResUnit1bit, self).__init__()
self.resize_identity = (stride != 1)
self.body = PreResBlock1bit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
binarized=binarized)
if self.resize_identity:
self.identity_pool = nn.AvgPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
identity = x
x = self.body(x)
if self.resize_identity:
identity = self.identity_pool(identity)
identity = torch.cat((identity, torch.zeros_like(identity)), dim=1)
x = x + identity
return x
class PreResActivation(nn.Module):
"""
PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
"""
def __init__(self,
in_channels,
bn_affine=True):
super(PreResActivation, self).__init__()
self.bn = nn.BatchNorm2d(
num_features=in_channels,
affine=bn_affine)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class CIFARWRN1bit(nn.Module):
"""
WRN-1bit model for CIFAR from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
binarized : bool, default True
Whether to use binarization.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
binarized=True,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARWRN1bit, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_1bit(
in_channels=in_channels,
out_channels=init_block_channels,
binarized=binarized))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), PreResUnit1bit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
binarized=binarized))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(
in_channels=in_channels,
bn_affine=False))
self.output = nn.Sequential()
self.output.add_module("final_conv", conv1x1_block_1bit(
in_channels=in_channels,
out_channels=num_classes,
activate=False,
binarized=binarized))
self.output.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.output(x)
x = x.view(x.size(0), -1)
return x
def get_wrn1bit_cifar(num_classes,
blocks,
width_factor,
binarized=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create WRN-1bit model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
blocks : int
Number of blocks.
width_factor : int
Wide scale factor for width of layers.
binarized : bool, default True
Whether to use binarization.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci * width_factor] * li for (ci, li) in zip(channels_per_layers, layers)]
init_block_channels *= width_factor
net = CIFARWRN1bit(
channels=channels,
init_block_channels=init_block_channels,
binarized=binarized,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def wrn20_10_1bit_cifar10(num_classes=10, **kwargs):
"""
WRN-20-10-1bit model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=True,
model_name="wrn20_10_1bit_cifar10", **kwargs)
def wrn20_10_1bit_cifar100(num_classes=100, **kwargs):
"""
WRN-20-10-1bit model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=True,
model_name="wrn20_10_1bit_cifar100", **kwargs)
def wrn20_10_1bit_svhn(num_classes=10, **kwargs):
"""
WRN-20-10-1bit model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=True,
model_name="wrn20_10_1bit_svhn", **kwargs)
def wrn20_10_32bit_cifar10(num_classes=10, **kwargs):
"""
WRN-20-10-32bit model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=False,
model_name="wrn20_10_32bit_cifar10", **kwargs)
def wrn20_10_32bit_cifar100(num_classes=100, **kwargs):
"""
WRN-20-10-32bit model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=False,
model_name="wrn20_10_32bit_cifar100", **kwargs)
def wrn20_10_32bit_svhn(num_classes=10, **kwargs):
"""
WRN-20-10-32bit model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=False,
model_name="wrn20_10_32bit_svhn", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(wrn20_10_1bit_cifar10, 10),
(wrn20_10_1bit_cifar100, 100),
(wrn20_10_1bit_svhn, 10),
(wrn20_10_32bit_cifar10, 10),
(wrn20_10_32bit_cifar100, 100),
(wrn20_10_32bit_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != wrn20_10_1bit_cifar10 or weight_count == 26737140)
assert (model != wrn20_10_1bit_cifar100 or weight_count == 26794920)
assert (model != wrn20_10_1bit_svhn or weight_count == 26737140)
assert (model != wrn20_10_32bit_cifar10 or weight_count == 26737140)
assert (model != wrn20_10_32bit_cifar100 or weight_count == 26794920)
assert (model != wrn20_10_32bit_svhn or weight_count == 26737140)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes))
if __name__ == "__main__":
_test()
| 24,899
| 30.558935
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/condensenet.py
|
"""
CondenseNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,'
https://arxiv.org/abs/1711.09224.
"""
__all__ = ['CondenseNet', 'condensenet74_c4_g4', 'condensenet74_c8_g8']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
from .common import ChannelShuffle
class CondenseSimpleConv(nn.Module):
"""
CondenseNet specific simple convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int
Number of groups.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups):
super(CondenseSimpleConv, self).__init__()
self.bn = nn.BatchNorm2d(num_features=in_channels)
self.activ = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False)
def forward(self, x):
x = self.bn(x)
x = self.activ(x)
x = self.conv(x)
return x
def condense_simple_conv3x3(in_channels,
out_channels,
groups):
"""
3x3 version of the CondenseNet specific simple convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups.
"""
return CondenseSimpleConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=1,
padding=1,
groups=groups)
class CondenseComplexConv(nn.Module):
"""
CondenseNet specific complex convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int
Number of groups.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups):
super(CondenseComplexConv, self).__init__()
self.bn = nn.BatchNorm2d(num_features=in_channels)
self.activ = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False)
self.c_shuffle = ChannelShuffle(
channels=out_channels,
groups=groups)
self.register_buffer('index', torch.LongTensor(in_channels))
self.index.fill_(0)
def forward(self, x):
x = torch.index_select(x, dim=1, index=Variable(self.index))
x = self.bn(x)
x = self.activ(x)
x = self.conv(x)
x = self.c_shuffle(x)
return x
def condense_complex_conv1x1(in_channels,
out_channels,
groups):
"""
1x1 version of the CondenseNet specific complex convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups.
"""
return CondenseComplexConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
groups=groups)
class CondenseUnit(nn.Module):
"""
CondenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups.
"""
def __init__(self,
in_channels,
out_channels,
groups):
super(CondenseUnit, self).__init__()
bottleneck_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bottleneck_size
self.conv1 = condense_complex_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=groups)
self.conv2 = condense_simple_conv3x3(
in_channels=mid_channels,
out_channels=inc_channels,
groups=groups)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
x = torch.cat((identity, x), dim=1)
return x
class TransitionBlock(nn.Module):
"""
CondenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the
first unit of each stage.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self):
super(TransitionBlock, self).__init__()
self.pool = nn.AvgPool2d(
kernel_size=2,
stride=2,
padding=0)
def forward(self, x):
x = self.pool(x)
return x
class CondenseInitBlock(nn.Module):
"""
CondenseNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(CondenseInitBlock, self).__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False)
def forward(self, x):
x = self.conv(x)
return x
class PostActivation(nn.Module):
"""
CondenseNet final block, which performs the same function of postactivation as in PreResNet.
Parameters:
----------
in_channels : int
Number of input channels.
"""
def __init__(self,
in_channels):
super(PostActivation, self).__init__()
self.bn = nn.BatchNorm2d(num_features=in_channels)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class CondenseLinear(nn.Module):
"""
CondenseNet specific linear block.
Parameters:
----------
in_features : int
Number of input channels.
out_features : int
Number of output channels.
drop_rate : float
Fraction of input channels for drop.
"""
def __init__(self,
in_features,
out_features,
drop_rate=0.5):
super(CondenseLinear, self).__init__()
drop_in_features = int(in_features * drop_rate)
self.linear = nn.Linear(
in_features=drop_in_features,
out_features=out_features)
self.register_buffer('index', torch.LongTensor(drop_in_features))
self.index.fill_(0)
def forward(self, x):
x = torch.index_select(x, dim=1, index=Variable(self.index))
x = self.linear(x)
return x
class CondenseNet(nn.Module):
"""
CondenseNet model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,'
https://arxiv.org/abs/1711.09224.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
groups,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(CondenseNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", CondenseInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
if i != 0:
stage.add_module("trans{}".format(i + 1), TransitionBlock())
for j, out_channels in enumerate(channels_per_stage):
stage.add_module("unit{}".format(j + 1), CondenseUnit(
in_channels=in_channels,
out_channels=out_channels,
groups=groups))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PostActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = CondenseLinear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
elif isinstance(module, nn.BatchNorm2d):
init.constant_(module.weight, 1)
init.constant_(module.bias, 0)
elif isinstance(module, nn.Linear):
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_condensenet(num_layers,
groups=4,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create CondenseNet (converted) model with specific parameters.
Parameters:
----------
num_layers : int
Number of layers.
groups : int
Number of groups in convolution layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if num_layers == 74:
init_block_channels = 16
layers = [4, 6, 8, 10, 8]
growth_rates = [8, 16, 32, 64, 128]
else:
raise ValueError("Unsupported CondenseNet version with number of layers {}".format(num_layers))
from functools import reduce
channels = reduce(lambda xi, yi:
xi + [reduce(lambda xj, yj:
xj + [xj[-1] + yj],
[yi[1]] * yi[0],
[xi[-1][-1]])[1:]],
zip(layers, growth_rates),
[[init_block_channels]])[1:]
net = CondenseNet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def condensenet74_c4_g4(**kwargs):
"""
CondenseNet-74 (C=G=4) model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,'
https://arxiv.org/abs/1711.09224.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_condensenet(num_layers=74, groups=4, model_name="condensenet74_c4_g4", **kwargs)
def condensenet74_c8_g8(**kwargs):
"""
CondenseNet-74 (C=G=8) model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,'
https://arxiv.org/abs/1711.09224.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_condensenet(num_layers=74, groups=8, model_name="condensenet74_c8_g8", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
condensenet74_c4_g4,
condensenet74_c8_g8,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != condensenet74_c4_g4 or weight_count == 4773944)
assert (model != condensenet74_c8_g8 or weight_count == 2935416)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 14,732
| 28.059172
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/fbnet.py
|
"""
FBNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,'
https://arxiv.org/abs/1812.03443.
"""
__all__ = ['FBNet', 'fbnet_cb']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block
class FBNetUnit(nn.Module):
"""
FBNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the second convolution layer.
bn_eps : float
Small float added to variance in Batch norm.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : int
Expansion factor for each unit.
activation : str, default 'relu'
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bn_eps,
use_kernel3,
exp_factor,
activation="relu"):
super(FBNetUnit, self).__init__()
assert (exp_factor >= 1)
self.residual = (in_channels == out_channels) and (stride == 1)
self.use_exp_conv = True
mid_channels = exp_factor * in_channels
if self.use_exp_conv:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_eps=bn_eps,
activation=activation)
if use_kernel3:
self.conv1 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
bn_eps=bn_eps,
activation=activation)
else:
self.conv1 = dwconv5x5_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
bn_eps=bn_eps,
activation=activation)
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=None)
def forward(self, x):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x)
x = self.conv1(x)
x = self.conv2(x)
if self.residual:
x = x + identity
return x
class FBNetInitBlock(nn.Module):
"""
FBNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps):
super(FBNetInitBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2,
bn_eps=bn_eps)
self.conv2 = FBNetUnit(
in_channels=out_channels,
out_channels=out_channels,
stride=1,
bn_eps=bn_eps,
use_kernel3=True,
exp_factor=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class FBNet(nn.Module):
"""
FBNet model from 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,'
https://arxiv.org/abs/1812.03443.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernels3,
exp_factors,
bn_eps=1e-5,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(FBNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", FBNetInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_eps=bn_eps))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) else 1
use_kernel3 = kernels3[i][j] == 1
exp_factor = exp_factors[i][j]
stage.add_module("unit{}".format(j + 1), FBNetUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bn_eps=bn_eps,
use_kernel3=use_kernel3,
exp_factor=exp_factor))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_eps=bn_eps))
in_channels = final_block_channels
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_fbnet(version,
bn_eps=1e-5,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create FBNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('a', 'b' or 'c').
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if version == "c":
init_block_channels = 16
final_block_channels = 1984
channels = [[24, 24, 24], [32, 32, 32, 32], [64, 64, 64, 64, 112, 112, 112, 112], [184, 184, 184, 184, 352]]
kernels3 = [[1, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1]]
exp_factors = [[6, 1, 1], [6, 3, 6, 6], [6, 3, 6, 6, 6, 6, 6, 3], [6, 6, 6, 6, 6]]
else:
raise ValueError("Unsupported FBNet version {}".format(version))
net = FBNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernels3=kernels3,
exp_factors=exp_factors,
bn_eps=bn_eps,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def fbnet_cb(**kwargs):
"""
FBNet-Cb model (bn_eps=1e-3) from 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural
Architecture Search,' https://arxiv.org/abs/1812.03443.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_fbnet(version="c", bn_eps=1e-3, model_name="fbnet_cb", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
fbnet_cb,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fbnet_cb or weight_count == 5572200)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 9,969
| 30.352201
| 116
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/visemenet.py
|
"""
VisemeNet for speech-driven facial animation, implemented in PyTorch.
Original paper: 'VisemeNet: Audio-Driven Animator-Centric Speech Animation,' https://arxiv.org/abs/1805.09488.
"""
__all__ = ['VisemeNet', 'visemenet20']
import os
import torch
import torch.nn as nn
from .common import DenseBlock
class VisemeDenseBranch(nn.Module):
"""
VisemeNet dense branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of middle/output channels.
"""
def __init__(self,
in_channels,
out_channels_list):
super(VisemeDenseBranch, self).__init__()
self.branch = nn.Sequential()
for i, out_channels in enumerate(out_channels_list[:-1]):
self.branch.add_module("block{}".format(i + 1), DenseBlock(
in_features=in_channels,
out_features=out_channels,
bias=True,
use_bn=True))
in_channels = out_channels
self.final_fc = nn.Linear(
in_features=in_channels,
out_features=out_channels_list[-1])
def forward(self, x):
x = self.branch(x)
y = self.final_fc(x)
return y, x
class VisemeRnnBranch(nn.Module):
"""
VisemeNet RNN branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of middle/output channels.
rnn_num_layers : int
Number of RNN layers.
dropout_rate : float
Dropout rate.
"""
def __init__(self,
in_channels,
out_channels_list,
rnn_num_layers,
dropout_rate):
super(VisemeRnnBranch, self).__init__()
self.rnn = nn.LSTM(
input_size=in_channels,
hidden_size=out_channels_list[0],
num_layers=rnn_num_layers,
dropout=dropout_rate)
self.fc_branch = VisemeDenseBranch(
in_channels=out_channels_list[0],
out_channels_list=out_channels_list[1:])
def forward(self, x):
x, _ = self.rnn(x)
x = x[:, -1, :]
y, _ = self.fc_branch(x)
return y
class VisemeNet(nn.Module):
"""
VisemeNet model from 'VisemeNet: Audio-Driven Animator-Centric Speech Animation,' https://arxiv.org/abs/1805.09488.
Parameters:
----------
audio_features : int, default 195
Number of audio features (characters/sounds).
audio_window_size : int, default 8
Size of audio window (for time related audio features).
stage2_window_size : int, default 64
Size of window for stage #2.
num_face_ids : int, default 76
Number of face IDs.
num_landmarks : int, default 76
Number of landmarks.
num_phonemes : int, default 21
Number of phonemes.
num_visemes : int, default 20
Number of visemes.
dropout_rate : float, default 0.5
Dropout rate for RNNs.
"""
def __init__(self,
audio_features=195,
audio_window_size=8,
stage2_window_size=64,
num_face_ids=76,
num_landmarks=76,
num_phonemes=21,
num_visemes=20,
dropout_rate=0.5):
super(VisemeNet, self).__init__()
stage1_rnn_hidden_size = 256
stage1_fc_mid_channels = 256
stage2_rnn_in_features = (audio_features + num_landmarks + stage1_fc_mid_channels) * \
stage2_window_size // audio_window_size
self.audio_window_size = audio_window_size
self.stage2_window_size = stage2_window_size
self.stage1_rnn = nn.LSTM(
input_size=audio_features,
hidden_size=stage1_rnn_hidden_size,
num_layers=3,
dropout=dropout_rate)
self.lm_branch = VisemeDenseBranch(
in_channels=(stage1_rnn_hidden_size + num_face_ids),
out_channels_list=[stage1_fc_mid_channels, num_landmarks])
self.ph_branch = VisemeDenseBranch(
in_channels=(stage1_rnn_hidden_size + num_face_ids),
out_channels_list=[stage1_fc_mid_channels, num_phonemes])
self.cls_branch = VisemeRnnBranch(
in_channels=stage2_rnn_in_features,
out_channels_list=[256, 200, num_visemes],
rnn_num_layers=1,
dropout_rate=dropout_rate)
self.reg_branch = VisemeRnnBranch(
in_channels=stage2_rnn_in_features,
out_channels_list=[256, 200, 100, num_visemes],
rnn_num_layers=3,
dropout_rate=dropout_rate)
self.jali_branch = VisemeRnnBranch(
in_channels=stage2_rnn_in_features,
out_channels_list=[128, 200, 2],
rnn_num_layers=3,
dropout_rate=dropout_rate)
def forward(self, x, pid):
y, _ = self.stage1_rnn(x)
y = y[:, -1, :]
y = torch.cat((y, pid), dim=1)
lm, _ = self.lm_branch(y)
lm += pid
ph, ph1 = self.ph_branch(y)
z = torch.cat((lm, ph1), dim=1)
z2 = torch.cat((z, x[:, self.audio_window_size // 2, :]), dim=1)
n_net2_input = z2.shape[1]
z2 = torch.cat((torch.zeros((self.stage2_window_size // 2, n_net2_input)), z2), dim=0)
z = torch.stack(
[z2[i:i + self.stage2_window_size].reshape(
(self.audio_window_size, n_net2_input * self.stage2_window_size // self.audio_window_size))
for i in range(z2.shape[0] - self.stage2_window_size)],
dim=0)
cls = self.cls_branch(z)
reg = self.reg_branch(z)
jali = self.jali_branch(z)
return cls, reg, jali
def get_visemenet(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create VisemeNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
net = VisemeNet(
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def visemenet20(**kwargs):
"""
VisemeNet model for 20 visemes (without co-articulation rules) from 'VisemeNet: Audio-Driven Animator-Centric
Speech Animation,' https://arxiv.org/abs/1805.09488.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_visemenet(model_name="visemenet20", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
visemenet20,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != visemenet20 or weight_count == 14574303)
batch = 34
audio_window_size = 8
audio_features = 195
num_face_ids = 76
num_visemes = 20
x = torch.randn(batch, audio_window_size, audio_features)
pid = torch.full(size=(batch, num_face_ids), fill_value=3)
y1, y2, y3 = net(x, pid)
assert (y1.shape[0] == y2.shape[0] == y3.shape[0])
assert (y1.shape[1] == y2.shape[1] == num_visemes)
assert (y3.shape[1] == 2)
if __name__ == "__main__":
_test()
| 8,396
| 30.215613
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/fractalnet_cifar.py
|
"""
FractalNet for CIFAR, implemented in PyTorch.
Original paper: 'FractalNet: Ultra-Deep Neural Networks without Residuals,' https://arxiv.org/abs/1605.07648.
"""
__all__ = ['CIFARFractalNet', 'fractalnet_cifar10', 'fractalnet_cifar100']
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import ParametricSequential
class DropConvBlock(nn.Module):
"""
Convolution block with Batch normalization, ReLU activation, and Dropout layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
bias=False,
dropout_prob=0.0):
super(DropConvBlock, self).__init__()
self.use_dropout = (dropout_prob != 0.0)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias)
self.bn = nn.BatchNorm2d(num_features=out_channels)
self.activ = nn.ReLU(inplace=True)
if self.use_dropout:
self.dropout = nn.Dropout2d(p=dropout_prob)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x)
return x
def drop_conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
bias=False,
dropout_prob=0.0):
"""
3x3 version of the convolution block with dropout.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
"""
return DropConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
bias=bias,
dropout_prob=dropout_prob)
class FractalBlock(nn.Module):
"""
FractalNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
num_columns : int
Number of columns in each block.
loc_drop_prob : float
Local drop path probability.
dropout_prob : float
Probability of dropout.
"""
def __init__(self,
in_channels,
out_channels,
num_columns,
loc_drop_prob,
dropout_prob):
super(FractalBlock, self).__init__()
assert (num_columns >= 1)
self.num_columns = num_columns
self.loc_drop_prob = loc_drop_prob
self.blocks = nn.Sequential()
depth = 2 ** (num_columns - 1)
for i in range(depth):
level_block_i = nn.Sequential()
for j in range(self.num_columns):
column_step_j = 2 ** j
if (i + 1) % column_step_j == 0:
in_channels_ij = in_channels if (i + 1 == column_step_j) else out_channels
level_block_i.add_module("subblock{}".format(j + 1), drop_conv3x3_block(
in_channels=in_channels_ij,
out_channels=out_channels,
dropout_prob=dropout_prob))
self.blocks.add_module("block{}".format(i + 1), level_block_i)
@staticmethod
def calc_drop_mask(batch_size,
glob_num_columns,
curr_num_columns,
max_num_columns,
loc_drop_prob):
"""
Calculate drop path mask.
Parameters:
----------
batch_size : int
Size of batch.
glob_num_columns : int
Number of columns in global drop path mask.
curr_num_columns : int
Number of active columns in the current level of block.
max_num_columns : int
Number of columns for all network.
loc_drop_prob : float
Local drop path probability.
Returns:
-------
Tensor
Resulted mask.
"""
glob_batch_size = glob_num_columns.shape[0]
glob_drop_mask = np.zeros((curr_num_columns, glob_batch_size), dtype=np.float32)
glob_drop_num_columns = glob_num_columns - (max_num_columns - curr_num_columns)
glob_drop_indices = np.where(glob_drop_num_columns >= 0)[0]
glob_drop_mask[glob_drop_num_columns[glob_drop_indices], glob_drop_indices] = 1.0
loc_batch_size = batch_size - glob_batch_size
loc_drop_mask = np.random.binomial(
n=1,
p=(1.0 - loc_drop_prob),
size=(curr_num_columns, loc_batch_size)).astype(np.float32)
alive_count = loc_drop_mask.sum(axis=0)
dead_indices = np.where(alive_count == 0.0)[0]
loc_drop_mask[np.random.randint(0, curr_num_columns, size=dead_indices.shape), dead_indices] = 1.0
drop_mask = np.concatenate((glob_drop_mask, loc_drop_mask), axis=1)
return torch.from_numpy(drop_mask)
@staticmethod
def join_outs(raw_outs,
glob_num_columns,
num_columns,
loc_drop_prob,
training):
"""
Join outputs for current level of block.
Parameters:
----------
raw_outs : list of Tensor
Current outputs from active columns.
glob_num_columns : int
Number of columns in global drop path mask.
num_columns : int
Number of columns for all network.
loc_drop_prob : float
Local drop path probability.
training : bool
Whether training mode for network.
Returns:
-------
Tensor
Joined output.
"""
curr_num_columns = len(raw_outs)
out = torch.stack(raw_outs, dim=0)
assert (out.size(0) == curr_num_columns)
if training:
batch_size = out.size(1)
batch_mask = FractalBlock.calc_drop_mask(
batch_size=batch_size,
glob_num_columns=glob_num_columns,
curr_num_columns=curr_num_columns,
max_num_columns=num_columns,
loc_drop_prob=loc_drop_prob)
batch_mask = batch_mask.to(out.device)
assert (batch_mask.size(0) == curr_num_columns)
assert (batch_mask.size(1) == batch_size)
batch_mask = batch_mask.unsqueeze(2).unsqueeze(3).unsqueeze(4)
masked_out = out * batch_mask
num_alive = batch_mask.sum(dim=0)
num_alive[num_alive == 0.0] = 1.0
out = masked_out.sum(dim=0) / num_alive
else:
out = out.mean(dim=0)
return out
def forward(self, x, glob_num_columns):
outs = [x] * self.num_columns
for level_block_i in self.blocks._modules.values():
outs_i = []
for j, block_ij in enumerate(level_block_i._modules.values()):
input_i = outs[j]
outs_i.append(block_ij(input_i))
joined_out = FractalBlock.join_outs(
raw_outs=outs_i[::-1],
glob_num_columns=glob_num_columns,
num_columns=self.num_columns,
loc_drop_prob=self.loc_drop_prob,
training=self.training)
len_level_block_i = len(level_block_i._modules.values())
for j in range(len_level_block_i):
outs[j] = joined_out
return outs[0]
class FractalUnit(nn.Module):
"""
FractalNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
num_columns : int
Number of columns in each block.
loc_drop_prob : float
Local drop path probability.
dropout_prob : float
Probability of dropout.
"""
def __init__(self,
in_channels,
out_channels,
num_columns,
loc_drop_prob,
dropout_prob):
super(FractalUnit, self).__init__()
self.block = FractalBlock(
in_channels=in_channels,
out_channels=out_channels,
num_columns=num_columns,
loc_drop_prob=loc_drop_prob,
dropout_prob=dropout_prob)
self.pool = nn.MaxPool2d(
kernel_size=2,
stride=2)
def forward(self, x, glob_num_columns):
x = self.block(x, glob_num_columns=glob_num_columns)
x = self.pool(x)
return x
class CIFARFractalNet(nn.Module):
"""
FractalNet model for CIFAR from 'FractalNet: Ultra-Deep Neural Networks without Residuals,'
https://arxiv.org/abs/1605.07648.
Parameters:
----------
channels : list of int
Number of output channels for each unit.
num_columns : int
Number of columns in each block.
dropout_probs : list of float
Probability of dropout in each block.
loc_drop_prob : float
Local drop path probability.
glob_drop_ratio : float
Global drop part fraction.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
num_columns,
dropout_probs,
loc_drop_prob,
glob_drop_ratio,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARFractalNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.glob_drop_ratio = glob_drop_ratio
self.num_columns = num_columns
self.features = ParametricSequential()
for i, out_channels in enumerate(channels):
dropout_prob = dropout_probs[i]
self.features.add_module("unit{}".format(i + 1), FractalUnit(
in_channels=in_channels,
out_channels=out_channels,
num_columns=num_columns,
loc_drop_prob=loc_drop_prob,
dropout_prob=dropout_prob))
in_channels = out_channels
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
glob_batch_size = int(x.size(0) * self.glob_drop_ratio)
glob_num_columns = np.random.randint(0, self.num_columns, size=(glob_batch_size,))
x = self.features(x, glob_num_columns=glob_num_columns)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_fractalnet_cifar(num_classes,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create WRN model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
dropout_probs = (0.0, 0.1, 0.2, 0.3, 0.4)
channels = [64 * (2 ** (i if i != len(dropout_probs) - 1 else i - 1)) for i in range(len(dropout_probs))]
num_columns = 3
loc_drop_prob = 0.15
glob_drop_ratio = 0.5
net = CIFARFractalNet(
channels=channels,
num_columns=num_columns,
dropout_probs=dropout_probs,
loc_drop_prob=loc_drop_prob,
glob_drop_ratio=glob_drop_ratio,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def fractalnet_cifar10(num_classes=10, **kwargs):
"""
FractalNet model for CIFAR-10 from 'FractalNet: Ultra-Deep Neural Networks without Residuals,'
https://arxiv.org/abs/1605.07648.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_fractalnet_cifar(num_classes=num_classes, model_name="fractalnet_cifar10", **kwargs)
def fractalnet_cifar100(num_classes=100, **kwargs):
"""
FractalNet model for CIFAR-100 from 'FractalNet: Ultra-Deep Neural Networks without Residuals,'
https://arxiv.org/abs/1605.07648.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_fractalnet_cifar(num_classes=num_classes, model_name="fractalnet_cifar100", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(fractalnet_cifar10, 10),
(fractalnet_cifar100, 100),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fractalnet_cifar10 or weight_count == 33724618)
assert (model != fractalnet_cifar100 or weight_count == 33770788)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes))
if __name__ == "__main__":
_test()
| 15,954
| 31.038153
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/mobilenetv3.py
|
"""
MobileNetV3 for ImageNet-1K, implemented in PyTorch.
Original paper: 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
"""
__all__ = ['MobileNetV3', 'mobilenetv3_small_w7d20', 'mobilenetv3_small_wd2', 'mobilenetv3_small_w3d4',
'mobilenetv3_small_w1', 'mobilenetv3_small_w5d4', 'mobilenetv3_large_w7d20', 'mobilenetv3_large_wd2',
'mobilenetv3_large_w3d4', 'mobilenetv3_large_w1', 'mobilenetv3_large_w5d4']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock,\
HSwish
class MobileNetV3Unit(nn.Module):
"""
MobileNetV3 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
exp_channels : int
Number of middle (expanded) channels.
stride : int or tuple/list of 2 int
Strides of the second convolution layer.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
activation : str
Activation function or name of activation function.
use_se : bool
Whether to use SE-module.
"""
def __init__(self,
in_channels,
out_channels,
exp_channels,
stride,
use_kernel3,
activation,
use_se):
super(MobileNetV3Unit, self).__init__()
assert (exp_channels >= out_channels)
self.residual = (in_channels == out_channels) and (stride == 1)
self.use_se = use_se
self.use_exp_conv = exp_channels != out_channels
mid_channels = exp_channels
if self.use_exp_conv:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
activation=activation)
if use_kernel3:
self.conv1 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
activation=activation)
else:
self.conv1 = dwconv5x5_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
activation=activation)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=4,
round_mid=True,
out_activation="hsigmoid")
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x)
x = self.conv1(x)
if self.use_se:
x = self.se(x)
x = self.conv2(x)
if self.residual:
x = x + identity
return x
class MobileNetV3FinalBlock(nn.Module):
"""
MobileNetV3 final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_se : bool
Whether to use SE-module.
"""
def __init__(self,
in_channels,
out_channels,
use_se):
super(MobileNetV3FinalBlock, self).__init__()
self.use_se = use_se
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation="hswish")
if self.use_se:
self.se = SEBlock(
channels=out_channels,
reduction=4,
round_mid=True,
out_activation="hsigmoid")
def forward(self, x):
x = self.conv(x)
if self.use_se:
x = self.se(x)
return x
class MobileNetV3Classifier(nn.Module):
"""
MobileNetV3 classifier.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
dropout_rate):
super(MobileNetV3Classifier, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.activ = HSwish(inplace=True)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
bias=True)
def forward(self, x):
x = self.conv1(x)
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x)
x = self.conv2(x)
return x
class MobileNetV3(nn.Module):
"""
MobileNetV3 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
exp_channels : list of list of int
Number of middle (expanded) channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
classifier_mid_channels : int
Number of middle channels for classifier.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
use_relu : list of list of int/bool
Using ReLU activation flag for each unit.
use_se : list of list of int/bool
Using SE-block flag for each unit.
first_stride : bool
Whether to use stride for the first stage.
final_use_se : bool
Whether to use SE-module in the final block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
exp_channels,
init_block_channels,
final_block_channels,
classifier_mid_channels,
kernels3,
use_relu,
use_se,
first_stride,
final_use_se,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(MobileNetV3, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
stride=2,
activation="hswish"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
exp_channels_ij = exp_channels[i][j]
stride = 2 if (j == 0) and ((i != 0) or first_stride) else 1
use_kernel3 = kernels3[i][j] == 1
activation = "relu" if use_relu[i][j] == 1 else "hswish"
use_se_flag = use_se[i][j] == 1
stage.add_module("unit{}".format(j + 1), MobileNetV3Unit(
in_channels=in_channels,
out_channels=out_channels,
exp_channels=exp_channels_ij,
use_kernel3=use_kernel3,
stride=stride,
activation=activation,
use_se=use_se_flag))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", MobileNetV3FinalBlock(
in_channels=in_channels,
out_channels=final_block_channels,
use_se=final_use_se))
in_channels = final_block_channels
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = MobileNetV3Classifier(
in_channels=in_channels,
out_channels=num_classes,
mid_channels=classifier_mid_channels,
dropout_rate=0.2)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.output(x)
x = x.view(x.size(0), -1)
return x
def get_mobilenetv3(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create MobileNetV3 model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('small' or 'large').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if version == "small":
init_block_channels = 16
channels = [[16], [24, 24], [40, 40, 40, 48, 48], [96, 96, 96]]
exp_channels = [[16], [72, 88], [96, 240, 240, 120, 144], [288, 576, 576]]
kernels3 = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]]
use_relu = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]]
use_se = [[1], [0, 0], [1, 1, 1, 1, 1], [1, 1, 1]]
first_stride = True
final_block_channels = 576
elif version == "large":
init_block_channels = 16
channels = [[16], [24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]]
exp_channels = [[16], [64, 72], [72, 120, 120], [240, 200, 184, 184, 480, 672], [672, 960, 960]]
kernels3 = [[1], [1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]]
use_relu = [[1], [1, 1], [1, 1, 1], [0, 0, 0, 0, 0, 0], [0, 0, 0]]
use_se = [[0], [0, 0], [1, 1, 1], [0, 0, 0, 0, 1, 1], [1, 1, 1]]
first_stride = False
final_block_channels = 960
else:
raise ValueError("Unsupported MobileNetV3 version {}".format(version))
final_use_se = False
classifier_mid_channels = 1280
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
exp_channels = [[round_channels(cij * width_scale) for cij in ci] for ci in exp_channels]
init_block_channels = round_channels(init_block_channels * width_scale)
if width_scale > 1.0:
final_block_channels = round_channels(final_block_channels * width_scale)
net = MobileNetV3(
channels=channels,
exp_channels=exp_channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
classifier_mid_channels=classifier_mid_channels,
kernels3=kernels3,
use_relu=use_relu,
use_se=use_se,
first_stride=first_stride,
final_use_se=final_use_se,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def mobilenetv3_small_w7d20(**kwargs):
"""
MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs)
def mobilenetv3_small_wd2(**kwargs):
"""
MobileNetV3 Small 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.5, model_name="mobilenetv3_small_wd2", **kwargs)
def mobilenetv3_small_w3d4(**kwargs):
"""
MobileNetV3 Small 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.75, model_name="mobilenetv3_small_w3d4", **kwargs)
def mobilenetv3_small_w1(**kwargs):
"""
MobileNetV3 Small 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=1.0, model_name="mobilenetv3_small_w1", **kwargs)
def mobilenetv3_small_w5d4(**kwargs):
"""
MobileNetV3 Small 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=1.25, model_name="mobilenetv3_small_w5d4", **kwargs)
def mobilenetv3_large_w7d20(**kwargs):
"""
MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs)
def mobilenetv3_large_wd2(**kwargs):
"""
MobileNetV3 Large 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.5, model_name="mobilenetv3_large_wd2", **kwargs)
def mobilenetv3_large_w3d4(**kwargs):
"""
MobileNetV3 Large 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.75, model_name="mobilenetv3_large_w3d4", **kwargs)
def mobilenetv3_large_w1(**kwargs):
"""
MobileNetV3 Large 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=1.0, model_name="mobilenetv3_large_w1", **kwargs)
def mobilenetv3_large_w5d4(**kwargs):
"""
MobileNetV3 Large 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=1.25, model_name="mobilenetv3_large_w5d4", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
mobilenetv3_small_w7d20,
mobilenetv3_small_wd2,
mobilenetv3_small_w3d4,
mobilenetv3_small_w1,
mobilenetv3_small_w5d4,
mobilenetv3_large_w7d20,
mobilenetv3_large_wd2,
mobilenetv3_large_w3d4,
mobilenetv3_large_w1,
mobilenetv3_large_w5d4,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetv3_small_w7d20 or weight_count == 2159600)
assert (model != mobilenetv3_small_wd2 or weight_count == 2288976)
assert (model != mobilenetv3_small_w3d4 or weight_count == 2581312)
assert (model != mobilenetv3_small_w1 or weight_count == 2945288)
assert (model != mobilenetv3_small_w5d4 or weight_count == 3643632)
assert (model != mobilenetv3_large_w7d20 or weight_count == 2943080)
assert (model != mobilenetv3_large_wd2 or weight_count == 3334896)
assert (model != mobilenetv3_large_w3d4 or weight_count == 4263496)
assert (model != mobilenetv3_large_w1 or weight_count == 5481752)
assert (model != mobilenetv3_large_w5d4 or weight_count == 7459144)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 18,999
| 33.234234
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/diaresnet.py
|
"""
DIA-ResNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
"""
__all__ = ['DIAResNet', 'diaresnet10', 'diaresnet12', 'diaresnet14', 'diaresnetbc14b', 'diaresnet16', 'diaresnet18',
'diaresnet26', 'diaresnetbc26b', 'diaresnet34', 'diaresnetbc38b', 'diaresnet50', 'diaresnet50b',
'diaresnet101', 'diaresnet101b', 'diaresnet152', 'diaresnet152b', 'diaresnet200', 'diaresnet200b',
'DIAAttention', 'DIAResUnit']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, DualPathSequential
from .resnet import ResBlock, ResBottleneck, ResInitBlock
class FirstLSTMAmp(nn.Module):
"""
First LSTM amplifier branch.
Parameters:
----------
in_features : int
Number of input channels.
out_features : int
Number of output channels.
"""
def __init__(self,
in_features,
out_features):
super(FirstLSTMAmp, self).__init__()
mid_features = in_features // 4
self.fc1 = nn.Linear(
in_features=in_features,
out_features=mid_features)
self.activ = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(
in_features=mid_features,
out_features=out_features)
def forward(self, x):
x = self.fc1(x)
x = self.activ(x)
x = self.fc2(x)
return x
class DIALSTMCell(nn.Module):
"""
DIA-LSTM cell.
Parameters:
----------
in_x_features : int
Number of x input channels.
in_h_features : int
Number of h input channels.
num_layers : int
Number of amplifiers.
dropout_rate : float, default 0.1
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_x_features,
in_h_features,
num_layers,
dropout_rate=0.1):
super(DIALSTMCell, self).__init__()
self.num_layers = num_layers
out_features = 4 * in_h_features
self.x_amps = nn.Sequential()
self.h_amps = nn.Sequential()
for i in range(num_layers):
amp_class = FirstLSTMAmp if i == 0 else nn.Linear
self.x_amps.add_module("amp{}".format(i + 1), amp_class(
in_features=in_x_features,
out_features=out_features))
self.h_amps.add_module("amp{}".format(i + 1), amp_class(
in_features=in_h_features,
out_features=out_features))
in_x_features = in_h_features
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x, h, c):
hy = []
cy = []
for i in range(self.num_layers):
hx_i = h[i]
cx_i = c[i]
gates = self.x_amps[i](x) + self.h_amps[i](hx_i)
i_gate, f_gate, c_gate, o_gate = gates.chunk(chunks=4, dim=1)
i_gate = torch.sigmoid(i_gate)
f_gate = torch.sigmoid(f_gate)
c_gate = torch.tanh(c_gate)
o_gate = torch.sigmoid(o_gate)
cy_i = (f_gate * cx_i) + (i_gate * c_gate)
hy_i = o_gate * torch.sigmoid(cy_i)
cy.append(cy_i)
hy.append(hy_i)
x = self.dropout(hy_i)
return hy, cy
class DIAAttention(nn.Module):
"""
DIA-Net attention module.
Parameters:
----------
in_x_features : int
Number of x input channels.
in_h_features : int
Number of h input channels.
num_layers : int, default 1
Number of amplifiers.
"""
def __init__(self,
in_x_features,
in_h_features,
num_layers=1):
super(DIAAttention, self).__init__()
self.num_layers = num_layers
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
self.lstm = DIALSTMCell(
in_x_features=in_x_features,
in_h_features=in_h_features,
num_layers=num_layers)
def forward(self, x, hc=None):
w = self.pool(x)
w = w.view(w.size(0), -1)
if hc is None:
h = [torch.zeros_like(w)] * self.num_layers
c = [torch.zeros_like(w)] * self.num_layers
else:
h, c = hc
h, c = self.lstm(w, h, c)
w = h[-1].unsqueeze(dim=-1).unsqueeze(dim=-1)
x = x * w
return x, (h, c)
class DIAResUnit(nn.Module):
"""
DIA-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer in bottleneck.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
attention : nn.Module, default None
Attention module.
"""
def __init__(self,
in_channels,
out_channels,
stride,
padding=1,
dilation=1,
bottleneck=True,
conv1_stride=False,
attention=None):
super(DIAResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
self.attention = attention
def forward(self, x, hc=None):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x, hc = self.attention(x, hc)
x = x + identity
x = self.activ(x)
return x, hc
class DIAResNet(nn.Module):
"""
DIA-ResNet model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(DIAResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(return_two=False)
attention = DIAAttention(
in_x_features=channels_per_stage[0],
in_h_features=channels_per_stage[0])
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), DIAResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
attention=attention))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_diaresnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create DIA-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported DIA-ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = DIAResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def diaresnet10(**kwargs):
"""
DIA-ResNet-10 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=10, model_name="diaresnet10", **kwargs)
def diaresnet12(**kwargs):
"""
DIA-ResNet-12 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=12, model_name="diaresnet12", **kwargs)
def diaresnet14(**kwargs):
"""
DIA-ResNet-14 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=14, model_name="diaresnet14", **kwargs)
def diaresnetbc14b(**kwargs):
"""
DIA-ResNet-BC-14b model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="diaresnetbc14b", **kwargs)
def diaresnet16(**kwargs):
"""
DIA-ResNet-16 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=16, model_name="diaresnet16", **kwargs)
def diaresnet18(**kwargs):
"""
DIA-ResNet-18 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=18, model_name="diaresnet18", **kwargs)
def diaresnet26(**kwargs):
"""
DIA-ResNet-26 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=26, bottleneck=False, model_name="diaresnet26", **kwargs)
def diaresnetbc26b(**kwargs):
"""
DIA-ResNet-BC-26b model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="diaresnetbc26b", **kwargs)
def diaresnet34(**kwargs):
"""
DIA-ResNet-34 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=34, model_name="diaresnet34", **kwargs)
def diaresnetbc38b(**kwargs):
"""
DIA-ResNet-BC-38b model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="diaresnetbc38b", **kwargs)
def diaresnet50(**kwargs):
"""
DIA-ResNet-50 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=50, model_name="diaresnet50", **kwargs)
def diaresnet50b(**kwargs):
"""
DIA-ResNet-50 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit
Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=50, conv1_stride=False, model_name="diaresnet50b", **kwargs)
def diaresnet101(**kwargs):
"""
DIA-ResNet-101 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=101, model_name="diaresnet101", **kwargs)
def diaresnet101b(**kwargs):
"""
DIA-ResNet-101 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit
Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=101, conv1_stride=False, model_name="diaresnet101b", **kwargs)
def diaresnet152(**kwargs):
"""
DIA-ResNet-152 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=152, model_name="diaresnet152", **kwargs)
def diaresnet152b(**kwargs):
"""
DIA-ResNet-152 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit
Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=152, conv1_stride=False, model_name="diaresnet152b", **kwargs)
def diaresnet200(**kwargs):
"""
DIA-ResNet-200 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=200, model_name="diaresnet200", **kwargs)
def diaresnet200b(**kwargs):
"""
DIA-ResNet-200 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit
Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=200, conv1_stride=False, model_name="diaresnet200b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
diaresnet10,
diaresnet12,
diaresnet14,
diaresnetbc14b,
diaresnet16,
diaresnet18,
diaresnet26,
diaresnetbc26b,
diaresnet34,
diaresnetbc38b,
diaresnet50,
diaresnet50b,
diaresnet101,
diaresnet101b,
diaresnet152,
diaresnet152b,
diaresnet200,
diaresnet200b,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != diaresnet10 or weight_count == 6297352)
assert (model != diaresnet12 or weight_count == 6371336)
assert (model != diaresnet14 or weight_count == 6666760)
assert (model != diaresnetbc14b or weight_count == 24023976)
assert (model != diaresnet16 or weight_count == 7847432)
assert (model != diaresnet18 or weight_count == 12568072)
assert (model != diaresnet26 or weight_count == 18838792)
assert (model != diaresnetbc26b or weight_count == 29954216)
assert (model != diaresnet34 or weight_count == 22676232)
assert (model != diaresnetbc38b or weight_count == 35884456)
assert (model != diaresnet50 or weight_count == 39516072)
assert (model != diaresnet50b or weight_count == 39516072)
assert (model != diaresnet101 or weight_count == 58508200)
assert (model != diaresnet101b or weight_count == 58508200)
assert (model != diaresnet152 or weight_count == 74151848)
assert (model != diaresnet152b or weight_count == 74151848)
assert (model != diaresnet200 or weight_count == 78632872)
assert (model != diaresnet200b or weight_count == 78632872)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 24,132
| 32.058904
| 116
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/lffd.py
|
"""
LFFD for face detection, implemented in PyTorch.
Original paper: 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633.
"""
__all__ = ['LFFD', 'lffd20x5s320v2_widerface', 'lffd25x8s560v1_widerface']
import os
import torch.nn as nn
from .common import conv3x3, conv1x1_block, conv3x3_block, Concurrent, MultiOutputSequential, ParallelConcurent
from .resnet import ResUnit
from .preresnet import PreResUnit
class LffdDetectionBranch(nn.Module):
"""
LFFD specific detection branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bias : bool
Whether the layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layer.
"""
def __init__(self,
in_channels,
out_channels,
bias,
use_bn):
super(LffdDetectionBranch, self).__init__()
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=in_channels,
bias=bias,
use_bn=use_bn)
self.conv2 = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class LffdDetectionBlock(nn.Module):
"""
LFFD specific detection block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
bias : bool
Whether the layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layer.
"""
def __init__(self,
in_channels,
mid_channels,
bias,
use_bn):
super(LffdDetectionBlock, self).__init__()
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bias=bias,
use_bn=use_bn)
self.branches = Concurrent()
self.branches.add_module("bbox_branch", LffdDetectionBranch(
in_channels=mid_channels,
out_channels=4,
bias=bias,
use_bn=use_bn))
self.branches.add_module("score_branch", LffdDetectionBranch(
in_channels=mid_channels,
out_channels=2,
bias=bias,
use_bn=use_bn))
def forward(self, x):
x = self.conv(x)
x = self.branches(x)
return x
class LFFD(nn.Module):
"""
LFFD model from 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633.
Parameters:
----------
enc_channels : list of int
Number of output channels for each encoder stage.
dec_channels : int
Number of output channels for each decoder stage.
init_block_channels : int
Number of output channels for the initial encoder unit.
layers : list of int
Number of units in each encoder stage.
int_bends : list of int
Number of internal bends for each encoder stage.
use_preresnet : bool
Whether to use PreResnet backbone instead of ResNet.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (640, 640)
Spatial size of the expected input image.
"""
def __init__(self,
enc_channels,
dec_channels,
init_block_channels,
layers,
int_bends,
use_preresnet,
in_channels=3,
in_size=(640, 640)):
super(LFFD, self).__init__()
self.in_size = in_size
unit_class = PreResUnit if use_preresnet else ResUnit
bias = True
use_bn = False
self.encoder = MultiOutputSequential(return_last=False)
self.encoder.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
stride=2,
padding=0,
bias=bias,
use_bn=use_bn))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(enc_channels):
layers_per_stage = layers[i]
int_bends_per_stage = int_bends[i]
stage = MultiOutputSequential(multi_output=False, dual_output=True)
stage.add_module("trans{}".format(i + 1), conv3x3(
in_channels=in_channels,
out_channels=channels_per_stage,
stride=2,
padding=0,
bias=bias))
for j in range(layers_per_stage):
unit = unit_class(
in_channels=channels_per_stage,
out_channels=channels_per_stage,
stride=1,
bias=bias,
use_bn=use_bn,
bottleneck=False)
if layers_per_stage - j <= int_bends_per_stage:
unit.do_output = True
stage.add_module("unit{}".format(j + 1), unit)
final_activ = nn.ReLU(inplace=True)
final_activ.do_output = True
stage.add_module("final_activ", final_activ)
stage.do_output2 = True
in_channels = channels_per_stage
self.encoder.add_module("stage{}".format(i + 1), stage)
self.decoder = ParallelConcurent()
k = 0
for i, channels_per_stage in enumerate(enc_channels):
layers_per_stage = layers[i]
int_bends_per_stage = int_bends[i]
for j in range(layers_per_stage):
if layers_per_stage - j <= int_bends_per_stage:
self.decoder.add_module("unit{}".format(k + 1), LffdDetectionBlock(
in_channels=channels_per_stage,
mid_channels=dec_channels,
bias=bias,
use_bn=use_bn))
k += 1
self.decoder.add_module("unit{}".format(k + 1), LffdDetectionBlock(
in_channels=channels_per_stage,
mid_channels=dec_channels,
bias=bias,
use_bn=use_bn))
k += 1
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def get_lffd(blocks,
use_preresnet,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create LFFD model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
use_preresnet : bool
Whether to use PreResnet backbone instead of ResNet.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 20:
layers = [3, 1, 1, 1, 1]
enc_channels = [64, 64, 64, 128, 128]
int_bends = [0, 0, 0, 0, 0]
elif blocks == 25:
layers = [4, 2, 1, 3]
enc_channels = [64, 64, 128, 128]
int_bends = [1, 1, 0, 2]
else:
raise ValueError("Unsupported LFFD with number of blocks: {}".format(blocks))
dec_channels = 128
init_block_channels = 64
net = LFFD(
enc_channels=enc_channels,
dec_channels=dec_channels,
init_block_channels=init_block_channels,
layers=layers,
int_bends=int_bends,
use_preresnet=use_preresnet,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def lffd20x5s320v2_widerface(**kwargs):
"""
LFFD-320-20L-5S-V2 model for WIDER FACE from 'LFFD: A Light and Fast Face Detector for Edge Devices,'
https://arxiv.org/abs/1904.10633.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_lffd(blocks=20, use_preresnet=True, model_name="lffd20x5s320v2_widerface", **kwargs)
def lffd25x8s560v1_widerface(**kwargs):
"""
LFFD-560-25L-8S-V1 model for WIDER FACE from 'LFFD: A Light and Fast Face Detector for Edge Devices,'
https://arxiv.org/abs/1904.10633.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_lffd(blocks=25, use_preresnet=False, model_name="lffd25x8s560v1_widerface", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
in_size = (640, 640)
pretrained = False
models = [
(lffd20x5s320v2_widerface, 5),
(lffd25x8s560v1_widerface, 8),
]
for model, num_outs in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != lffd20x5s320v2_widerface or weight_count == 1520606)
assert (model != lffd25x8s560v1_widerface or weight_count == 2290608)
batch = 14
x = torch.randn(batch, 3, in_size[0], in_size[1])
y = net(x)
assert (len(y) == num_outs)
if __name__ == "__main__":
_test()
| 10,582
| 30.685629
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/sepreresnet.py
|
"""
SE-PreResNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEPreResNet', 'sepreresnet10', 'sepreresnet12', 'sepreresnet14', 'sepreresnet16', 'sepreresnet18',
'sepreresnet26', 'sepreresnetbc26b', 'sepreresnet34', 'sepreresnetbc38b', 'sepreresnet50', 'sepreresnet50b',
'sepreresnet101', 'sepreresnet101b', 'sepreresnet152', 'sepreresnet152b', 'sepreresnet200',
'sepreresnet200b', 'SEPreResUnit']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, SEBlock
from .preresnet import PreResBlock, PreResBottleneck, PreResInitBlock, PreResActivation
class SEPreResUnit(nn.Module):
"""
SE-PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck,
conv1_stride):
super(SEPreResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = PreResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
conv1_stride=conv1_stride)
else:
self.body = PreResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
def forward(self, x):
identity = x
x, x_pre_activ = self.body(x)
x = self.se(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x = x + identity
return x
class SEPreResNet(nn.Module):
"""
SE-PreResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SEPreResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 1 if (i == 0) or (j != 0) else 2
stage.add_module("unit{}".format(j + 1), SEPreResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_sepreresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SE-PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported SE-PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def sepreresnet10(**kwargs):
"""
SE-PreResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=10, model_name="sepreresnet10", **kwargs)
def sepreresnet12(**kwargs):
"""
SE-PreResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=12, model_name="sepreresnet12", **kwargs)
def sepreresnet14(**kwargs):
"""
SE-PreResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=14, model_name="sepreresnet14", **kwargs)
def sepreresnet16(**kwargs):
"""
SE-PreResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=16, model_name="sepreresnet16", **kwargs)
def sepreresnet18(**kwargs):
"""
SE-PreResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=18, model_name="sepreresnet18", **kwargs)
def sepreresnet26(**kwargs):
"""
SE-PreResNet-26 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=26, bottleneck=False, model_name="sepreresnet26", **kwargs)
def sepreresnetbc26b(**kwargs):
"""
SE-PreResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc26b", **kwargs)
def sepreresnet34(**kwargs):
"""
SE-PreResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=34, model_name="sepreresnet34", **kwargs)
def sepreresnetbc38b(**kwargs):
"""
SE-PreResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc38b", **kwargs)
def sepreresnet50(**kwargs):
"""
SE-PreResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, model_name="sepreresnet50", **kwargs)
def sepreresnet50b(**kwargs):
"""
SE-PreResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, conv1_stride=False, model_name="sepreresnet50b", **kwargs)
def sepreresnet101(**kwargs):
"""
SE-PreResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, model_name="sepreresnet101", **kwargs)
def sepreresnet101b(**kwargs):
"""
SE-PreResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, conv1_stride=False, model_name="sepreresnet101b", **kwargs)
def sepreresnet152(**kwargs):
"""
SE-PreResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, model_name="sepreresnet152", **kwargs)
def sepreresnet152b(**kwargs):
"""
SE-PreResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, conv1_stride=False, model_name="sepreresnet152b", **kwargs)
def sepreresnet200(**kwargs):
"""
SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an
experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs)
def sepreresnet200b(**kwargs):
"""
SE-PreResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, conv1_stride=False, model_name="sepreresnet200b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
sepreresnet10,
sepreresnet12,
sepreresnet14,
sepreresnet16,
sepreresnet18,
sepreresnet26,
sepreresnetbc26b,
sepreresnet34,
sepreresnetbc38b,
sepreresnet50,
sepreresnet50b,
sepreresnet101,
sepreresnet101b,
sepreresnet152,
sepreresnet152b,
sepreresnet200,
sepreresnet200b,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sepreresnet10 or weight_count == 5461668)
assert (model != sepreresnet12 or weight_count == 5536232)
assert (model != sepreresnet14 or weight_count == 5833840)
assert (model != sepreresnet16 or weight_count == 7022976)
assert (model != sepreresnet18 or weight_count == 11776928)
assert (model != sepreresnet26 or weight_count == 18092188)
assert (model != sepreresnetbc26b or weight_count == 17388424)
assert (model != sepreresnet34 or weight_count == 21957204)
assert (model != sepreresnetbc38b or weight_count == 24019064)
assert (model != sepreresnet50 or weight_count == 28080472)
assert (model != sepreresnet50b or weight_count == 28080472)
assert (model != sepreresnet101 or weight_count == 49319320)
assert (model != sepreresnet101b or weight_count == 49319320)
assert (model != sepreresnet152 or weight_count == 66814296)
assert (model != sepreresnet152b or weight_count == 66814296)
assert (model != sepreresnet200 or weight_count == 71828312)
assert (model != sepreresnet200b or weight_count == 71828312)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 18,420
| 32.371377
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/resnext.py
|
"""
ResNeXt for ImageNet-1K, implemented in PyTorch.
Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
"""
__all__ = ['ResNeXt', 'resnext14_16x4d', 'resnext14_32x2d', 'resnext14_32x4d', 'resnext26_16x4d', 'resnext26_32x2d',
'resnext26_32x4d', 'resnext38_32x4d', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d',
'ResNeXtBottleneck', 'ResNeXtUnit']
import os
import math
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block
from .resnet import ResInitBlock
class ResNeXtBottleneck(nn.Module):
"""
ResNeXt bottleneck block for residual path in ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality,
bottleneck_width,
bottleneck_factor=4):
super(ResNeXtBottleneck, self).__init__()
mid_channels = out_channels // bottleneck_factor
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=group_width)
self.conv2 = conv3x3_block(
in_channels=group_width,
out_channels=group_width,
stride=stride,
groups=cardinality)
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class ResNeXtUnit(nn.Module):
"""
ResNeXt unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality,
bottleneck_width):
super(ResNeXtUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = ResNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class ResNeXt(nn.Module):
"""
ResNeXt model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ResNeXt, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), ResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_resnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 14:
layers = [1, 1, 1, 1]
elif blocks == 26:
layers = [2, 2, 2, 2]
elif blocks == 38:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported ResNeXt with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = ResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def resnext14_16x4d(**kwargs):
"""
ResNeXt-14 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=14, cardinality=16, bottleneck_width=4, model_name="resnext14_16x4d", **kwargs)
def resnext14_32x2d(**kwargs):
"""
ResNeXt-14 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=14, cardinality=32, bottleneck_width=2, model_name="resnext14_32x2d", **kwargs)
def resnext14_32x4d(**kwargs):
"""
ResNeXt-14 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=14, cardinality=32, bottleneck_width=4, model_name="resnext14_32x4d", **kwargs)
def resnext26_16x4d(**kwargs):
"""
ResNeXt-26 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=26, cardinality=16, bottleneck_width=4, model_name="resnext26_16x4d", **kwargs)
def resnext26_32x2d(**kwargs):
"""
ResNeXt-26 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=26, cardinality=32, bottleneck_width=2, model_name="resnext26_32x2d", **kwargs)
def resnext26_32x4d(**kwargs):
"""
ResNeXt-26 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=26, cardinality=32, bottleneck_width=4, model_name="resnext26_32x4d", **kwargs)
def resnext38_32x4d(**kwargs):
"""
ResNeXt-38 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=38, cardinality=32, bottleneck_width=4, model_name="resnext38_32x4d", **kwargs)
def resnext50_32x4d(**kwargs):
"""
ResNeXt-50 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="resnext50_32x4d", **kwargs)
def resnext101_32x4d(**kwargs):
"""
ResNeXt-101 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="resnext101_32x4d", **kwargs)
def resnext101_64x4d(**kwargs):
"""
ResNeXt-101 (64x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="resnext101_64x4d", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
resnext14_16x4d,
resnext14_32x2d,
resnext14_32x4d,
resnext26_16x4d,
resnext26_32x2d,
resnext26_32x4d,
resnext38_32x4d,
resnext50_32x4d,
resnext101_32x4d,
resnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnext14_16x4d or weight_count == 7127336)
assert (model != resnext14_32x2d or weight_count == 7029416)
assert (model != resnext14_32x4d or weight_count == 9411880)
assert (model != resnext26_16x4d or weight_count == 10119976)
assert (model != resnext26_32x2d or weight_count == 9924136)
assert (model != resnext26_32x4d or weight_count == 15389480)
assert (model != resnext38_32x4d or weight_count == 21367080)
assert (model != resnext50_32x4d or weight_count == 25028904)
assert (model != resnext101_32x4d or weight_count == 44177704)
assert (model != resnext101_64x4d or weight_count == 83455272)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 14,857
| 31.090713
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/jasper.py
|
"""
Jasper/DR for ASR, implemented in PyTorch.
Original paper: 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288.
"""
__all__ = ['Jasper', 'jasper5x3', 'jasper10x4', 'jasper10x5', 'get_jasper', 'MaskConv1d', 'NemoAudioReader',
'NemoMelSpecExtractor', 'CtcDecoder']
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common import DualPathSequential, DualPathParallelConcurent
def outmask_fill(x, x_len, value=0.0):
"""
Masked fill a tensor.
Parameters:
----------
x : tensor
Input tensor.
x_len : tensor
Tensor with lengths.
value : float, default 0.0
Filled value.
Returns:
-------
tensor
Resulted tensor.
"""
max_len = x.size(2)
mask = torch.arange(max_len).to(x_len.device).expand(len(x_len), max_len) >= x_len.unsqueeze(1)
mask = mask.unsqueeze(dim=1).to(device=x.device)
x = x.masked_fill(mask=mask, value=value)
return x
def masked_normalize(x, x_len):
"""
Normalize a tensor with mask.
Parameters:
----------
x : tensor
Input tensor.
x_len : tensor
Tensor with lengths.
Returns:
-------
tensor
Resulted tensor.
"""
x = outmask_fill(x, x_len)
x_mean = x.sum(dim=2) / x_len.unsqueeze(dim=1)
x_m0 = x - x_mean.unsqueeze(dim=2)
x_m0 = outmask_fill(x_m0, x_len)
x_std = x_m0.sum(dim=2) / x_len.unsqueeze(dim=1)
x = x_m0 / x_std.unsqueeze(dim=2)
return x
def masked_normalize2(x, x_len):
"""
Normalize a tensor with mask (scheme #2).
Parameters:
----------
x : tensor
Input tensor.
x_len : tensor
Tensor with lengths.
Returns:
-------
tensor
Resulted tensor.
"""
x = outmask_fill(x, x_len)
x_mean = x.sum(dim=2) / x_len.unsqueeze(dim=1)
x2_mean = x.square().sum(dim=2) / x_len.unsqueeze(dim=1)
x_std = (x2_mean - x_mean.square()).sqrt()
x = (x - x_mean.unsqueeze(dim=2)) / x_std.unsqueeze(dim=2)
return x
def masked_normalize3(x, x_len):
"""
Normalize a tensor with mask (scheme #3).
Parameters:
----------
x : tensor
Input tensor.
x_len : tensor
Tensor with lengths.
Returns:
-------
tensor
Resulted tensor.
"""
x_eps = 1e-5
x_mean = torch.zeros(x.shape[:2], dtype=x.dtype, device=x.device)
x_std = torch.zeros(x.shape[:2], dtype=x.dtype, device=x.device)
for i in range(x.shape[0]):
x_mean[i, :] = x[i, :, : x_len[i]].mean(dim=1)
x_std[i, :] = x[i, :, : x_len[i]].std(dim=1)
x_std += x_eps
return (x - x_mean.unsqueeze(dim=2)) / x_std.unsqueeze(dim=2)
class NemoAudioReader(object):
"""
Audio Reader from NVIDIA NEMO toolkit.
Parameters:
----------
desired_audio_sample_rate : int, default 16000
Desired audio sample rate.
trunc_value : int or None, default None
Value to truncate.
"""
def __init__(self, desired_audio_sample_rate=16000):
super(NemoAudioReader, self).__init__()
self.desired_audio_sample_rate = desired_audio_sample_rate
def read_from_file(self, audio_file_path):
"""
Read audio from file.
Parameters:
----------
audio_file_path : str
Path to audio file.
Returns:
-------
np.array
Audio data.
"""
from soundfile import SoundFile
with SoundFile(audio_file_path, "r") as data:
sample_rate = data.samplerate
audio_data = data.read(dtype="float32")
audio_data = audio_data.transpose()
if sample_rate != self.desired_audio_sample_rate:
from librosa.core import resample as lr_resample
audio_data = lr_resample(y=audio_data, orig_sr=sample_rate, target_sr=self.desired_audio_sample_rate)
if audio_data.ndim >= 2:
audio_data = np.mean(audio_data, axis=1)
return audio_data
def read_from_files(self, audio_file_paths):
"""
Read audios from files.
Parameters:
----------
audio_file_paths : list of str
Paths to audio files.
Returns:
-------
list of np.array
Audio data.
"""
assert (type(audio_file_paths) in (list, tuple))
audio_data_list = []
for audio_file_path in audio_file_paths:
audio_data = self.read_from_file(audio_file_path)
audio_data_list.append(audio_data)
return audio_data_list
class NemoMelSpecExtractor(nn.Module):
"""
Mel-Spectrogram Extractor from NVIDIA NEMO toolkit.
Parameters:
----------
sample_rate : int, default 16000
Sample rate of the input audio data.
window_size_sec : float, default 0.02
Size of window for FFT in seconds.
window_stride_sec : float, default 0.01
Stride of window for FFT in seconds.
n_fft : int, default 512
Length of FT window.
n_filters : int, default 64
Number of Mel spectrogram freq bins.
preemph : float, default 0.97
Amount of pre emphasis to add to audio.
dither : float, default 1.0e-05
Amount of white-noise dithering.
"""
def __init__(self,
sample_rate=16000,
window_size_sec=0.02,
window_stride_sec=0.01,
n_fft=512,
n_filters=64,
preemph=0.97,
dither=1.0e-5):
super(NemoMelSpecExtractor, self).__init__()
self.log_zero_guard_value = 2 ** -24
win_length = int(window_size_sec * sample_rate)
self.hop_length = int(window_stride_sec * sample_rate)
self.n_filters = n_filters
window_tensor = torch.hann_window(win_length, periodic=False)
self.register_buffer("window", window_tensor)
self.stft = lambda x: torch.stft(
x,
n_fft=n_fft,
hop_length=self.hop_length,
win_length=win_length,
window=self.window.to(dtype=torch.float),
center=True)
self.dither = dither
self.preemph = preemph
self.pad_align = 16
from librosa.filters import mel as librosa_mel
filter_bank = librosa_mel(
sr=sample_rate,
n_fft=n_fft,
n_mels=n_filters,
fmin=0.0,
fmax=(sample_rate / 2.0))
fb_tensor = torch.from_numpy(filter_bank).unsqueeze(0)
self.register_buffer("fb", fb_tensor)
def forward(self, x, x_len):
"""
Preprocess audio.
Parameters:
----------
xs : list of np.array
Audio data.
Returns:
-------
x : np.array
Audio data.
x_len : np.array
Audio data lengths.
"""
x_len = torch.ceil(x_len.float() / self.hop_length).long()
if self.dither > 0:
x += self.dither * torch.randn_like(x)
x = torch.cat((x[:, :1], x[:, 1:] - self.preemph * x[:, :-1]), dim=1)
with torch.cuda.amp.autocast(enabled=False):
x = self.stft(x)
x = x.pow(2).sum(-1)
x = torch.matmul(self.fb.to(x.dtype), x)
x = torch.log(x + self.log_zero_guard_value)
x = masked_normalize2(x, x_len)
x = outmask_fill(x, x_len)
x_len_max = x.size(-1)
pad_rem = x_len_max % self.pad_align
if pad_rem != 0:
x = F.pad(x, pad=(0, self.pad_align - pad_rem))
return x, x_len
def calc_flops(self, x):
assert (x.shape[0] == 1)
num_flops = x.numel()
num_macs = 0
return num_flops, num_macs
class CtcDecoder(object):
"""
CTC decoder (to decode a sequence of labels to words).
Parameters:
----------
vocabulary : list of str
Vocabulary of the dataset.
"""
def __init__(self,
vocabulary):
super().__init__()
self.blank_id = len(vocabulary)
self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))])
def __call__(self,
predictions):
"""
Decode a sequence of labels to words.
Parameters:
----------
predictions : np.array of int or list of list of int
Tensor with predicted labels.
Returns:
-------
list of str
Words.
"""
hypotheses = []
for prediction in predictions:
decoded_prediction = []
previous = self.blank_id
for p in prediction:
if (p != previous or previous == self.blank_id) and p != self.blank_id:
decoded_prediction.append(p)
previous = p
hypothesis = "".join([self.labels_map[c] for c in decoded_prediction])
hypotheses.append(hypothesis)
return hypotheses
def conv1d1(in_channels,
out_channels,
stride=1,
groups=1,
bias=False):
"""
1-dim kernel version of the 1D convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups,
bias=bias)
class MaskConv1d(nn.Conv1d):
"""
Masked 1D convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 1 int
Convolution window size.
stride : int or tuple/list of 1 int
Strides of the convolution.
padding : int or tuple/list of 1 int, default 0
Padding value for convolution layer.
dilation : int or tuple/list of 1 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_mask : bool, default True
Whether to use mask.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding=0,
dilation=1,
groups=1,
bias=False,
use_mask=True):
super(MaskConv1d, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
self.use_mask = use_mask
def forward(self, x, x_len):
if self.use_mask:
x = outmask_fill(x, x_len)
x_len = (x_len + 2 * self.padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) -
1) // self.stride[0] + 1
x = F.conv1d(
input=x,
weight=self.weight,
bias=self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups)
return x, x_len
def mask_conv1d1(in_channels,
out_channels,
stride=1,
groups=1,
bias=False):
"""
Masked 1-dim kernel version of the 1D convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return MaskConv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups,
bias=bias)
class MaskConvBlock1d(nn.Module):
"""
Masked 1D convolution block with batch normalization, activation, and dropout.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
stride : int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
dropout_rate=0.0):
super(MaskConvBlock1d, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.use_dropout = (dropout_rate != 0.0)
self.conv = MaskConv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm1d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = activation()
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x, x_len):
x, x_len = self.conv(x, x_len)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x)
return x, x_len
def mask_conv1d1_block(in_channels,
out_channels,
stride=1,
padding=0,
**kwargs):
"""
1-dim kernel version of the masked 1D convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int, default 1
Strides of the convolution.
padding : int, default 0
Padding value for convolution layer.
"""
return MaskConvBlock1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
**kwargs)
class ChannelShuffle1d(nn.Module):
"""
1D version of the channel shuffle layer.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle1d, self).__init__()
assert (channels % groups == 0)
self.groups = groups
def forward(self, x):
batch, channels, seq_len = x.size()
channels_per_group = channels // self.groups
x = x.view(batch, self.groups, channels_per_group, seq_len)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, seq_len)
return x
def __repr__(self):
s = "{name}(groups={groups})"
return s.format(
name=self.__class__.__name__,
groups=self.groups)
class DwsConvBlock1d(nn.Module):
"""
Depthwise version of the 1D standard convolution block with batch normalization, activation, dropout, and channel
shuffle.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
stride : int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
dropout_rate=0.0):
super(DwsConvBlock1d, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.use_dropout = (dropout_rate != 0.0)
self.use_channel_shuffle = (groups > 1)
self.dw_conv = MaskConv1d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=bias)
self.pw_conv = mask_conv1d1(
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
bias=bias)
if self.use_channel_shuffle:
self.shuffle = ChannelShuffle1d(
channels=out_channels,
groups=groups)
if self.use_bn:
self.bn = nn.BatchNorm1d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = activation()
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x, x_len):
x, x_len = self.dw_conv(x, x_len)
x, x_len = self.pw_conv(x, x_len)
if self.use_channel_shuffle:
x = self.shuffle(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x)
return x, x_len
class JasperUnit(nn.Module):
"""
Jasper unit with residual connection.
Parameters:
----------
in_channels : int or list of int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
bn_eps : float
Small float added to variance in Batch norm.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
repeat : int
Count of body convolution blocks.
use_dw : bool
Whether to use depthwise block.
use_dr : bool
Whether to use dense residual scheme.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
bn_eps,
dropout_rate,
repeat,
use_dw,
use_dr):
super(JasperUnit, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
self.use_dr = use_dr
block_class = DwsConvBlock1d if use_dw else MaskConvBlock1d
if self.use_dr:
self.identity_block = DualPathParallelConcurent()
for i, dense_in_channels_i in enumerate(in_channels):
self.identity_block.add_module("block{}".format(i + 1), mask_conv1d1_block(
in_channels=dense_in_channels_i,
out_channels=out_channels,
bn_eps=bn_eps,
dropout_rate=0.0,
activation=None))
in_channels = in_channels[-1]
else:
self.identity_block = mask_conv1d1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_eps=bn_eps,
dropout_rate=0.0,
activation=None)
self.body = DualPathSequential()
for i in range(repeat):
activation = (lambda: nn.ReLU(inplace=True)) if i < repeat - 1 else None
dropout_rate_i = dropout_rate if i < repeat - 1 else 0.0
self.body.add_module("block{}".format(i + 1), block_class(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=(kernel_size // 2),
bn_eps=bn_eps,
dropout_rate=dropout_rate_i,
activation=activation))
in_channels = out_channels
self.activ = nn.ReLU(inplace=True)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x, x_len):
if self.use_dr:
x_len, y, y_len = x_len if type(x_len) is tuple else (x_len, None, None)
y = [x] if y is None else y + [x]
y_len = [x_len] if y_len is None else y_len + [x_len]
identity, _ = self.identity_block(y, y_len)
identity = torch.stack(tuple(identity), dim=1)
identity = identity.sum(dim=1)
else:
identity, _ = self.identity_block(x, x_len)
x, x_len = self.body(x, x_len)
x = x + identity
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x)
if self.use_dr:
return x, (x_len, y, y_len)
else:
return x, x_len
class JasperFinalBlock(nn.Module):
"""
Jasper specific final block.
Parameters:
----------
in_channels : int
Number of input channels.
channels : list of int
Number of output channels for each block.
kernel_sizes : list of int
Kernel sizes for each block.
bn_eps : float
Small float added to variance in Batch norm.
dropout_rates : list of int
Dropout rates for each block.
use_dw : bool
Whether to use depthwise block.
use_dr : bool
Whether to use dense residual scheme.
"""
def __init__(self,
in_channels,
channels,
kernel_sizes,
bn_eps,
dropout_rates,
use_dw,
use_dr):
super(JasperFinalBlock, self).__init__()
self.use_dr = use_dr
conv1_class = DwsConvBlock1d if use_dw else MaskConvBlock1d
self.conv1 = conv1_class(
in_channels=in_channels,
out_channels=channels[-2],
kernel_size=kernel_sizes[-2],
stride=1,
padding=(2 * kernel_sizes[-2] // 2 - 1),
dilation=2,
bn_eps=bn_eps,
dropout_rate=dropout_rates[-2])
self.conv2 = MaskConvBlock1d(
in_channels=channels[-2],
out_channels=channels[-1],
kernel_size=kernel_sizes[-1],
stride=1,
padding=(kernel_sizes[-1] // 2),
bn_eps=bn_eps,
dropout_rate=dropout_rates[-1])
def forward(self, x, x_len):
if self.use_dr:
x_len = x_len[0]
x, x_len = self.conv1(x, x_len)
x, x_len = self.conv2(x, x_len)
return x, x_len
class Jasper(nn.Module):
"""
Jasper/DR/QuartzNet model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
channels : list of int
Number of output channels for each unit and initial/final block.
kernel_sizes : list of int
Kernel sizes for each unit and initial/final block.
bn_eps : float
Small float added to variance in Batch norm.
dropout_rates : list of int
Dropout rates for each unit and initial/final block.
repeat : int
Count of body convolution blocks.
use_dw : bool
Whether to use depthwise block.
use_dr : bool
Whether to use dense residual scheme.
from_audio : bool, default True
Whether to treat input as audio instead of Mel-specs.
dither : float, default 0.0
Amount of white-noise dithering.
return_text : bool, default False
Whether to return text instead of logits.
vocabulary : list of str or None, default None
Vocabulary of the dataset.
in_channels : int, default 64
Number of input channels (audio features).
num_classes : int, default 29
Number of classification classes (number of graphemes).
"""
def __init__(self,
channels,
kernel_sizes,
bn_eps,
dropout_rates,
repeat,
use_dw,
use_dr,
from_audio=True,
dither=0.0,
return_text=False,
vocabulary=None,
in_channels=64,
num_classes=29):
super(Jasper, self).__init__()
self.in_size = in_channels
self.num_classes = num_classes
self.vocabulary = vocabulary
self.from_audio = from_audio
self.return_text = return_text
if self.from_audio:
self.preprocessor = NemoMelSpecExtractor(dither=dither)
self.features = DualPathSequential()
init_block_class = DwsConvBlock1d if use_dw else MaskConvBlock1d
self.features.add_module("init_block", init_block_class(
in_channels=in_channels,
out_channels=channels[0],
kernel_size=kernel_sizes[0],
stride=2,
padding=(kernel_sizes[0] // 2),
bn_eps=bn_eps,
dropout_rate=dropout_rates[0]))
in_channels = channels[0]
in_channels_list = []
for i, (out_channels, kernel_size, dropout_rate) in\
enumerate(zip(channels[1:-2], kernel_sizes[1:-2], dropout_rates[1:-2])):
in_channels_list += [in_channels]
self.features.add_module("unit{}".format(i + 1), JasperUnit(
in_channels=(in_channels_list if use_dr else in_channels),
out_channels=out_channels,
kernel_size=kernel_size,
bn_eps=bn_eps,
dropout_rate=dropout_rate,
repeat=repeat,
use_dw=use_dw,
use_dr=use_dr))
in_channels = out_channels
self.features.add_module("final_block", JasperFinalBlock(
in_channels=in_channels,
channels=channels,
kernel_sizes=kernel_sizes,
bn_eps=bn_eps,
dropout_rates=dropout_rates,
use_dw=use_dw,
use_dr=use_dr))
in_channels = channels[-1]
self.output = conv1d1(
in_channels=in_channels,
out_channels=num_classes,
bias=True)
if self.return_text:
self.ctc_decoder = CtcDecoder(vocabulary=vocabulary)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x, x_len=None):
if x_len is None:
assert (type(x) in (list, tuple))
x, x_len = x
if self.from_audio:
x, x_len = self.preprocessor(x, x_len)
x, x_len = self.features(x, x_len)
x = self.output(x)
if self.return_text:
greedy_predictions = x.transpose(1, 2).log_softmax(dim=-1).argmax(dim=-1, keepdim=False).cpu().numpy()
return self.ctc_decoder(greedy_predictions)
else:
return x, x_len
def get_jasper(version,
use_dw=False,
use_dr=False,
bn_eps=1e-3,
vocabulary=None,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create Jasper/DR/QuartzNet model with specific parameters.
Parameters:
----------
version : tuple of str
Model type and configuration.
use_dw : bool, default False
Whether to use depthwise block.
use_dr : bool, default False
Whether to use dense residual scheme.
bn_eps : float, default 1e-3
Small float added to variance in Batch norm.
vocabulary : list of str or None, default None
Vocabulary of the dataset.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
import numpy as np
blocks, repeat = tuple(map(int, version[1].split("x")))
main_stage_repeat = blocks // 5
model_type = version[0]
if model_type == "jasper":
channels_per_stage = [256, 256, 384, 512, 640, 768, 896, 1024]
kernel_sizes_per_stage = [11, 11, 13, 17, 21, 25, 29, 1]
dropout_rates_per_stage = [0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4]
elif model_type == "quartznet":
channels_per_stage = [256, 256, 256, 512, 512, 512, 512, 1024]
kernel_sizes_per_stage = [33, 33, 39, 51, 63, 75, 87, 1]
dropout_rates_per_stage = [0.0] * 8
else:
raise ValueError("Unsupported Jasper family model type: {}".format(model_type))
stage_repeat = np.full((8,), 1)
stage_repeat[1:-2] *= main_stage_repeat
channels = sum([[a] * r for (a, r) in zip(channels_per_stage, stage_repeat)], [])
kernel_sizes = sum([[a] * r for (a, r) in zip(kernel_sizes_per_stage, stage_repeat)], [])
dropout_rates = sum([[a] * r for (a, r) in zip(dropout_rates_per_stage, stage_repeat)], [])
net = Jasper(
channels=channels,
kernel_sizes=kernel_sizes,
bn_eps=bn_eps,
dropout_rates=dropout_rates,
repeat=repeat,
use_dw=use_dw,
use_dr=use_dr,
vocabulary=vocabulary,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def jasper5x3(**kwargs):
"""
Jasper 5x3 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_jasper(version=("jasper", "5x3"), model_name="jasper5x3", **kwargs)
def jasper10x4(**kwargs):
"""
Jasper 10x4 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_jasper(version=("jasper", "10x4"), model_name="jasper10x4", **kwargs)
def jasper10x5(**kwargs):
"""
Jasper 10x5 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_jasper(version=("jasper", "10x5"), model_name="jasper10x5", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
from_audio = True
audio_features = 64
num_classes = 29
use_cuda = True
models = [
jasper5x3,
jasper10x4,
jasper10x5,
]
for model in models:
net = model(
in_channels=audio_features,
num_classes=num_classes,
from_audio=from_audio,
pretrained=pretrained)
if use_cuda:
net = net.cuda()
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != jasper5x3 or weight_count == 107681053)
assert (model != jasper10x4 or weight_count == 261393693)
assert (model != jasper10x5 or weight_count == 322286877)
batch = 3
aud_scale = 640 if from_audio else 1
seq_len = np.random.randint(150, 250, batch) * aud_scale
seq_len_max = seq_len.max() + 2
x_shape = (batch, seq_len_max) if from_audio else (batch, audio_features, seq_len_max)
x = torch.randn(x_shape)
x_len = torch.tensor(seq_len, dtype=torch.long, device=x.device)
if use_cuda:
x = x.cuda()
x_len = x_len.cuda()
y, y_len = net(x, x_len)
# y.sum().backward()
assert (tuple(y.size())[:2] == (batch, net.num_classes))
if from_audio:
assert (y.size()[2] in range(seq_len_max // aud_scale * 2, seq_len_max // aud_scale * 2 + 9))
else:
assert (y.size()[2] in [seq_len_max // 2, seq_len_max // 2 + 1])
if __name__ == "__main__":
_test()
| 35,202
| 29.347414
| 117
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/resneta.py
|
"""
ResNet(A) with average downsampling for ImageNet-1K, implemented in PyTorch.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNetA', 'resneta10', 'resnetabc14b', 'resneta18', 'resneta50b', 'resneta101b', 'resneta152b']
import os
import torch.nn as nn
from .common import conv1x1_block
from .resnet import ResBlock, ResBottleneck
from .senet import SEInitBlock
class ResADownBlock(nn.Module):
"""
ResNet(A) downsample block for the identity branch of a residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
"""
def __init__(self,
in_channels,
out_channels,
stride,
dilation=1):
super(ResADownBlock, self).__init__()
self.pool = nn.AvgPool2d(
kernel_size=(stride if dilation == 1 else 1),
stride=(stride if dilation == 1 else 1),
ceil_mode=True,
count_include_pad=False)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.pool(x)
x = self.conv(x)
return x
class ResAUnit(nn.Module):
"""
ResNet(A) unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer in bottleneck.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
padding=1,
dilation=1,
bottleneck=True,
conv1_stride=False):
super(ResAUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
if self.resize_identity:
self.identity_block = ResADownBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
dilation=dilation)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_block(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class ResNetA(nn.Module):
"""
ResNet(A) with average downsampling model from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
dilated : bool, default False
Whether to use dilation.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
dilated=False,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ResNetA, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
if dilated:
stride = 2 if ((j == 0) and (i != 0) and (i < 2)) else 1
dilation = (2 ** max(0, i - 1 - int(j == 0)))
else:
stride = 2 if (j == 0) and (i != 0) else 1
dilation = 1
stage.add_module("unit{}".format(j + 1), ResAUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=dilation,
dilation=dilation,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_resneta(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ResNet(A) with average downsampling model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet(A) with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNetA(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def resneta10(**kwargs):
"""
ResNet(A)-10 with average downsampling model from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=10, model_name="resneta10", **kwargs)
def resnetabc14b(**kwargs):
"""
ResNet(A)-BC-14b with average downsampling model from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetabc14b", **kwargs)
def resneta18(**kwargs):
"""
ResNet(A)-18 with average downsampling model from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=18, model_name="resneta18", **kwargs)
def resneta50b(**kwargs):
"""
ResNet(A)-50 with average downsampling model with stride at the second convolution in bottleneck block
from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=50, conv1_stride=False, model_name="resneta50b", **kwargs)
def resneta101b(**kwargs):
"""
ResNet(A)-101 with average downsampling model with stride at the second convolution in bottleneck
block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=101, conv1_stride=False, model_name="resneta101b", **kwargs)
def resneta152b(**kwargs):
"""
ResNet(A)-152 with average downsampling model with stride at the second convolution in bottleneck
block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=152, conv1_stride=False, model_name="resneta152b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
resneta10,
resnetabc14b,
resneta18,
resneta50b,
resneta101b,
resneta152b,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resneta10 or weight_count == 5438024)
assert (model != resnetabc14b or weight_count == 10084168)
assert (model != resneta18 or weight_count == 11708744)
assert (model != resneta50b or weight_count == 25576264)
assert (model != resneta101b or weight_count == 44568392)
assert (model != resneta152b or weight_count == 60212040)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 14,395
| 32.094253
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/resnesta.py
|
"""
ResNeSt(A) with average downsampling for ImageNet-1K, implemented in PyTorch.
Original paper: 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
"""
__all__ = ['ResNeStA', 'resnestabc14', 'resnesta18', 'resnestabc26', 'resnesta50', 'resnesta101', 'resnesta152',
'resnesta200', 'resnesta269', 'ResNeStADownBlock']
import os
import torch.nn as nn
from .common import conv1x1_block, conv3x3_block, saconv3x3_block
from .senet import SEInitBlock
class ResNeStABlock(nn.Module):
"""
Simple ResNeSt(A) block for residual path in ResNeSt(A) unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bias=False,
use_bn=True):
super(ResNeStABlock, self).__init__()
self.resize = (stride > 1)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn)
if self.resize:
self.pool = nn.AvgPool2d(
kernel_size=3,
stride=stride,
padding=1)
self.conv2 = saconv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn,
activation=None)
def forward(self, x):
x = self.conv1(x)
if self.resize:
x = self.pool(x)
x = self.conv2(x)
return x
class ResNeStABottleneck(nn.Module):
"""
ResNeSt(A) bottleneck block for residual path in ResNeSt(A) unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck_factor=4):
super(ResNeStABottleneck, self).__init__()
self.resize = (stride > 1)
mid_channels = out_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = saconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels)
if self.resize:
self.pool = nn.AvgPool2d(
kernel_size=3,
stride=stride,
padding=1)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
if self.resize:
x = self.pool(x)
x = self.conv3(x)
return x
class ResNeStADownBlock(nn.Module):
"""
ResNeSt(A) downsample block for the identity branch of a residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride):
super(ResNeStADownBlock, self).__init__()
self.pool = nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.pool(x)
x = self.conv(x)
return x
class ResNeStAUnit(nn.Module):
"""
ResNeSt(A) unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck=True):
super(ResNeStAUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = ResNeStABottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
else:
self.body = ResNeStABlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
if self.resize_identity:
self.identity_block = ResNeStADownBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_block(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class ResNeStA(nn.Module):
"""
ResNeSt(A) with average downsampling model from 'ResNeSt: Split-Attention Networks,'
https://arxiv.org/abs/2004.08955.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
dropout_rate=0.0,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ResNeStA, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), ResNeStAUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1))
self.output = nn.Sequential()
if dropout_rate > 0.0:
self.output.add_module("dropout", nn.Dropout(p=dropout_rate))
self.output.add_module("fc", nn.Linear(
in_features=in_channels,
out_features=num_classes))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_resnesta(blocks,
bottleneck=None,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ResNeSt(A) with average downsampling model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported ResNeSt(A) with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if blocks >= 101:
init_block_channels *= 2
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNeStA(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def resnestabc14(**kwargs):
"""
ResNeSt(A)-BC-14 with average downsampling model from 'ResNeSt: Split-Attention Networks,'
https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=14, bottleneck=True, model_name="resnestabc14", **kwargs)
def resnesta18(**kwargs):
"""
ResNeSt(A)-18 with average downsampling model from 'ResNeSt: Split-Attention Networks,'
https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=18, model_name="resnesta18", **kwargs)
def resnestabc26(**kwargs):
"""
ResNeSt(A)-BC-26 with average downsampling model from 'ResNeSt: Split-Attention Networks,'
https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=26, bottleneck=True, model_name="resnestabc26", **kwargs)
def resnesta50(**kwargs):
"""
ResNeSt(A)-50 with average downsampling model with stride at the second convolution in bottleneck block
from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=50, model_name="resnesta50", **kwargs)
def resnesta101(**kwargs):
"""
ResNeSt(A)-101 with average downsampling model with stride at the second convolution in bottleneck
block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=101, model_name="resnesta101", **kwargs)
def resnesta152(**kwargs):
"""
ResNeSt(A)-152 with average downsampling model with stride at the second convolution in bottleneck
block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=152, model_name="resnesta152", **kwargs)
def resnesta200(in_size=(256, 256), **kwargs):
"""
ResNeSt(A)-200 with average downsampling model with stride at the second convolution in bottleneck
block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
in_size : tuple of two ints, default (256, 256)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=200, in_size=in_size, dropout_rate=0.2, model_name="resnesta200", **kwargs)
def resnesta269(in_size=(320, 320), **kwargs):
"""
ResNeSt(A)-269 with average downsampling model with stride at the second convolution in bottleneck
block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
in_size : tuple of two ints, default (320, 320)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=269, in_size=in_size, dropout_rate=0.2, model_name="resnesta269", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(resnestabc14, 224),
(resnesta18, 224),
(resnestabc26, 224),
(resnesta50, 224),
(resnesta101, 224),
(resnesta152, 224),
(resnesta200, 256),
(resnesta269, 320),
]
for model, size in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnestabc14 or weight_count == 10611688)
assert (model != resnesta18 or weight_count == 12763784)
assert (model != resnestabc26 or weight_count == 17069448)
assert (model != resnesta50 or weight_count == 27483240)
assert (model != resnesta101 or weight_count == 48275016)
assert (model != resnesta152 or weight_count == 65316040)
assert (model != resnesta200 or weight_count == 70201544)
assert (model != resnesta269 or weight_count == 110929480)
batch = 14
x = torch.randn(batch, 3, size, size)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (batch, 1000))
if __name__ == "__main__":
_test()
| 17,572
| 30.892922
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/senet.py
|
"""
SENet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SENet', 'senet16', 'senet28', 'senet40', 'senet52', 'senet103', 'senet154', 'SEInitBlock']
import os
import math
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block, SEBlock
class SENetBottleneck(nn.Module):
"""
SENet bottleneck block for residual path in SENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality,
bottleneck_width):
super(SENetBottleneck, self).__init__()
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
group_width2 = group_width // 2
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=group_width2)
self.conv2 = conv3x3_block(
in_channels=group_width2,
out_channels=group_width,
stride=stride,
groups=cardinality)
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class SENetUnit(nn.Module):
"""
SENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
identity_conv3x3 : bool, default False
Whether to use 3x3 convolution in the identity link.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality,
bottleneck_width,
identity_conv3x3):
super(SENetUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = SENetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width)
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
if identity_conv3x3:
self.identity_conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
else:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = self.se(x)
x = x + identity
x = self.activ(x)
return x
class SEInitBlock(nn.Module):
"""
SENet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(SEInitBlock, self).__init__()
mid_channels = out_channels // 2
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=2)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool(x)
return x
class SENet(nn.Module):
"""
SENet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SENet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
identity_conv3x3 = (i != 0)
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), SENetUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
identity_conv3x3=identity_conv3x3))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Sequential()
self.output.add_module("dropout", nn.Dropout(p=0.2))
self.output.add_module("fc", nn.Linear(
in_features=in_channels,
out_features=num_classes))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_senet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SENet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 16:
layers = [1, 1, 1, 1]
cardinality = 32
elif blocks == 28:
layers = [2, 2, 2, 2]
cardinality = 32
elif blocks == 40:
layers = [3, 3, 3, 3]
cardinality = 32
elif blocks == 52:
layers = [3, 4, 6, 3]
cardinality = 32
elif blocks == 103:
layers = [3, 4, 23, 3]
cardinality = 32
elif blocks == 154:
layers = [3, 8, 36, 3]
cardinality = 64
else:
raise ValueError("Unsupported SENet with number of blocks: {}".format(blocks))
bottleneck_width = 4
init_block_channels = 128
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SENet(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def senet16(**kwargs):
"""
SENet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=16, model_name="senet16", **kwargs)
def senet28(**kwargs):
"""
SENet-28 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=28, model_name="senet28", **kwargs)
def senet40(**kwargs):
"""
SENet-40 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=40, model_name="senet40", **kwargs)
def senet52(**kwargs):
"""
SENet-52 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=52, model_name="senet52", **kwargs)
def senet103(**kwargs):
"""
SENet-103 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=103, model_name="senet103", **kwargs)
def senet154(**kwargs):
"""
SENet-154 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=154, model_name="senet154", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
senet16,
senet28,
senet40,
senet52,
senet103,
senet154,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != senet16 or weight_count == 31366168)
assert (model != senet28 or weight_count == 36453768)
assert (model != senet40 or weight_count == 41541368)
assert (model != senet52 or weight_count == 44659416)
assert (model != senet103 or weight_count == 60963096)
assert (model != senet154 or weight_count == 115088984)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 13,095
| 28.696145
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/diapreresnet_cifar.py
|
"""
DIA-PreResNet for CIFAR/SVHN, implemented in PyTorch.
Original papers: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
"""
__all__ = ['CIFARDIAPreResNet', 'diapreresnet20_cifar10', 'diapreresnet20_cifar100', 'diapreresnet20_svhn',
'diapreresnet56_cifar10', 'diapreresnet56_cifar100', 'diapreresnet56_svhn', 'diapreresnet110_cifar10',
'diapreresnet110_cifar100', 'diapreresnet110_svhn', 'diapreresnet164bn_cifar10',
'diapreresnet164bn_cifar100', 'diapreresnet164bn_svhn', 'diapreresnet1001_cifar10',
'diapreresnet1001_cifar100', 'diapreresnet1001_svhn', 'diapreresnet1202_cifar10',
'diapreresnet1202_cifar100', 'diapreresnet1202_svhn']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv3x3, DualPathSequential
from .preresnet import PreResActivation
from .diaresnet import DIAAttention
from .diapreresnet import DIAPreResUnit
class CIFARDIAPreResNet(nn.Module):
"""
DIA-PreResNet model for CIFAR from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARDIAPreResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(return_two=False)
attention = DIAAttention(
in_x_features=channels_per_stage[0],
in_h_features=channels_per_stage[0])
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), DIAPreResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=False,
attention=attention))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_diapreresnet_cifar(num_classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create DIA-PreResNet model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert (num_classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARDIAPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def diapreresnet20_cifar10(num_classes=10, **kwargs):
"""
DIA-PreResNet-20 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False,
model_name="diapreresnet20_cifar10", **kwargs)
def diapreresnet20_cifar100(num_classes=100, **kwargs):
"""
DIA-PreResNet-20 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False,
model_name="diapreresnet20_cifar100", **kwargs)
def diapreresnet20_svhn(num_classes=10, **kwargs):
"""
DIA-PreResNet-20 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False,
model_name="diapreresnet20_svhn", **kwargs)
def diapreresnet56_cifar10(num_classes=10, **kwargs):
"""
DIA-PreResNet-56 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False,
model_name="diapreresnet56_cifar10", **kwargs)
def diapreresnet56_cifar100(num_classes=100, **kwargs):
"""
DIA-PreResNet-56 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False,
model_name="diapreresnet56_cifar100", **kwargs)
def diapreresnet56_svhn(num_classes=10, **kwargs):
"""
DIA-PreResNet-56 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False,
model_name="diapreresnet56_svhn", **kwargs)
def diapreresnet110_cifar10(num_classes=10, **kwargs):
"""
DIA-PreResNet-110 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False,
model_name="diapreresnet110_cifar10", **kwargs)
def diapreresnet110_cifar100(num_classes=100, **kwargs):
"""
DIA-PreResNet-110 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False,
model_name="diapreresnet110_cifar100", **kwargs)
def diapreresnet110_svhn(num_classes=10, **kwargs):
"""
DIA-PreResNet-110 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False,
model_name="diapreresnet110_svhn", **kwargs)
def diapreresnet164bn_cifar10(num_classes=10, **kwargs):
"""
DIA-PreResNet-164(BN) model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True,
model_name="diapreresnet164bn_cifar10", **kwargs)
def diapreresnet164bn_cifar100(num_classes=100, **kwargs):
"""
DIA-PreResNet-164(BN) model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True,
model_name="diapreresnet164bn_cifar100", **kwargs)
def diapreresnet164bn_svhn(num_classes=10, **kwargs):
"""
DIA-PreResNet-164(BN) model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True,
model_name="diapreresnet164bn_svhn", **kwargs)
def diapreresnet1001_cifar10(num_classes=10, **kwargs):
"""
DIA-PreResNet-1001 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True,
model_name="diapreresnet1001_cifar10", **kwargs)
def diapreresnet1001_cifar100(num_classes=100, **kwargs):
"""
DIA-PreResNet-1001 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True,
model_name="diapreresnet1001_cifar100", **kwargs)
def diapreresnet1001_svhn(num_classes=10, **kwargs):
"""
DIA-PreResNet-1001 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True,
model_name="diapreresnet1001_svhn", **kwargs)
def diapreresnet1202_cifar10(num_classes=10, **kwargs):
"""
DIA-PreResNet-1202 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False,
model_name="diapreresnet1202_cifar10", **kwargs)
def diapreresnet1202_cifar100(num_classes=100, **kwargs):
"""
DIA-PreResNet-1202 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False,
model_name="diapreresnet1202_cifar100", **kwargs)
def diapreresnet1202_svhn(num_classes=10, **kwargs):
"""
DIA-PreResNet-1202 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False,
model_name="diapreresnet1202_svhn", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(diapreresnet20_cifar10, 10),
(diapreresnet20_cifar100, 100),
(diapreresnet20_svhn, 10),
(diapreresnet56_cifar10, 10),
(diapreresnet56_cifar100, 100),
(diapreresnet56_svhn, 10),
(diapreresnet110_cifar10, 10),
(diapreresnet110_cifar100, 100),
(diapreresnet110_svhn, 10),
(diapreresnet164bn_cifar10, 10),
(diapreresnet164bn_cifar100, 100),
(diapreresnet164bn_svhn, 10),
(diapreresnet1001_cifar10, 10),
(diapreresnet1001_cifar100, 100),
(diapreresnet1001_svhn, 10),
(diapreresnet1202_cifar10, 10),
(diapreresnet1202_cifar100, 100),
(diapreresnet1202_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != diapreresnet20_cifar10 or weight_count == 286674)
assert (model != diapreresnet20_cifar100 or weight_count == 292524)
assert (model != diapreresnet20_svhn or weight_count == 286674)
assert (model != diapreresnet56_cifar10 or weight_count == 869970)
assert (model != diapreresnet56_cifar100 or weight_count == 875820)
assert (model != diapreresnet56_svhn or weight_count == 869970)
assert (model != diapreresnet110_cifar10 or weight_count == 1744914)
assert (model != diapreresnet110_cifar100 or weight_count == 1750764)
assert (model != diapreresnet110_svhn or weight_count == 1744914)
assert (model != diapreresnet164bn_cifar10 or weight_count == 1922106)
assert (model != diapreresnet164bn_cifar100 or weight_count == 1945236)
assert (model != diapreresnet164bn_svhn or weight_count == 1922106)
assert (model != diapreresnet1001_cifar10 or weight_count == 10546554)
assert (model != diapreresnet1001_cifar100 or weight_count == 10569684)
assert (model != diapreresnet1001_svhn or weight_count == 10546554)
assert (model != diapreresnet1202_cifar10 or weight_count == 19438226)
assert (model != diapreresnet1202_cifar100 or weight_count == 19444076)
assert (model != diapreresnet1202_svhn or weight_count == 19438226)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes))
if __name__ == "__main__":
_test()
| 20,604
| 36.327899
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/simplepose_coco.py
|
"""
SimplePose for COCO Keypoint, implemented in PyTorch.
Original paper: 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
"""
__all__ = ['SimplePose', 'simplepose_resnet18_coco', 'simplepose_resnet50b_coco', 'simplepose_resnet101b_coco',
'simplepose_resnet152b_coco', 'simplepose_resneta50b_coco', 'simplepose_resneta101b_coco',
'simplepose_resneta152b_coco']
import os
import torch
import torch.nn as nn
from .common import DeconvBlock, conv1x1, HeatmapMaxDetBlock
from .resnet import resnet18, resnet50b, resnet101b, resnet152b
from .resneta import resneta50b, resneta101b, resneta152b
class SimplePose(nn.Module):
"""
SimplePose model from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
channels : list of int
Number of output channels for each decoder unit.
return_heatmap : bool, default False
Whether to return only heatmap.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 192)
Spatial size of the expected input image.
keypoints : int, default 17
Number of keypoints.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
return_heatmap=False,
in_channels=3,
in_size=(256, 192),
keypoints=17):
super(SimplePose, self).__init__()
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.return_heatmap = return_heatmap
self.backbone = backbone
self.decoder = nn.Sequential()
in_channels = backbone_out_channels
for i, out_channels in enumerate(channels):
self.decoder.add_module("unit{}".format(i + 1), DeconvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=4,
stride=2,
padding=1))
in_channels = out_channels
self.decoder.add_module("final_block", conv1x1(
in_channels=in_channels,
out_channels=keypoints,
bias=True))
self.heatmap_max_det = HeatmapMaxDetBlock()
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.backbone(x)
heatmap = self.decoder(x)
if self.return_heatmap:
return heatmap
else:
keypoints = self.heatmap_max_det(heatmap)
return keypoints
def get_simplepose(backbone,
backbone_out_channels,
keypoints,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SimplePose model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
keypoints : int
Number of keypoints.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels = [256, 256, 256]
net = SimplePose(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
keypoints=keypoints,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def simplepose_resnet18_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
SimplePose model on the base of ResNet-18 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and
Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnet18(pretrained=pretrained_backbone).features
del backbone[-1]
return get_simplepose(backbone=backbone, backbone_out_channels=512, keypoints=keypoints,
model_name="simplepose_resnet18_coco", **kwargs)
def simplepose_resnet50b_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
SimplePose model on the base of ResNet-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and
Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone).features
del backbone[-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="simplepose_resnet50b_coco", **kwargs)
def simplepose_resnet101b_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
SimplePose model on the base of ResNet-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnet101b(pretrained=pretrained_backbone).features
del backbone[-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="simplepose_resnet101b_coco", **kwargs)
def simplepose_resnet152b_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
SimplePose model on the base of ResNet-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnet152b(pretrained=pretrained_backbone).features
del backbone[-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="simplepose_resnet152b_coco", **kwargs)
def simplepose_resneta50b_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
SimplePose model on the base of ResNet(A)-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resneta50b(pretrained=pretrained_backbone).features
del backbone[-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="simplepose_resneta50b_coco", **kwargs)
def simplepose_resneta101b_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
SimplePose model on the base of ResNet(A)-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resneta101b(pretrained=pretrained_backbone).features
del backbone[-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="simplepose_resneta101b_coco", **kwargs)
def simplepose_resneta152b_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
SimplePose model on the base of ResNet(A)-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resneta152b(pretrained=pretrained_backbone).features
del backbone[-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="simplepose_resneta152b_coco", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
in_size = (256, 192)
keypoints = 17
return_heatmap = False
pretrained = False
models = [
simplepose_resnet18_coco,
simplepose_resnet50b_coco,
simplepose_resnet101b_coco,
simplepose_resnet152b_coco,
simplepose_resneta50b_coco,
simplepose_resneta101b_coco,
simplepose_resneta152b_coco,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != simplepose_resnet18_coco or weight_count == 15376721)
assert (model != simplepose_resnet50b_coco or weight_count == 33999697)
assert (model != simplepose_resnet101b_coco or weight_count == 52991825)
assert (model != simplepose_resnet152b_coco or weight_count == 68635473)
assert (model != simplepose_resneta50b_coco or weight_count == 34018929)
assert (model != simplepose_resneta101b_coco or weight_count == 53011057)
assert (model != simplepose_resneta152b_coco or weight_count == 68654705)
batch = 14
x = torch.randn(batch, 3, in_size[0], in_size[1])
y = net(x)
assert ((y.shape[0] == batch) and (y.shape[1] == keypoints))
if return_heatmap:
assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4))
else:
assert (y.shape[2] == 3)
if __name__ == "__main__":
_test()
| 12,777
| 36.145349
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/vovnet.py
|
"""
VoVNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
"""
__all__ = ['VoVNet', 'vovnet27s', 'vovnet39', 'vovnet57']
import os
import torch.nn as nn
from .common import conv1x1_block, conv3x3_block, SequentialConcurrent
class VoVUnit(nn.Module):
"""
VoVNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
branch_channels : int
Number of output channels for each branch.
num_branches : int
Number of branches.
resize : bool
Whether to use resize block.
use_residual : bool
Whether to use residual block.
"""
def __init__(self,
in_channels,
out_channels,
branch_channels,
num_branches,
resize,
use_residual):
super(VoVUnit, self).__init__()
self.resize = resize
self.use_residual = use_residual
if self.resize:
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
ceil_mode=True)
self.branches = SequentialConcurrent()
branch_in_channels = in_channels
for i in range(num_branches):
self.branches.add_module("branch{}".format(i + 1), conv3x3_block(
in_channels=branch_in_channels,
out_channels=branch_channels))
branch_in_channels = branch_channels
self.concat_conv = conv1x1_block(
in_channels=(in_channels + num_branches * branch_channels),
out_channels=out_channels)
def forward(self, x):
if self.resize:
x = self.pool(x)
if self.use_residual:
identity = x
x = self.branches(x)
x = self.concat_conv(x)
if self.use_residual:
x = x + identity
return x
class VoVInitBlock(nn.Module):
"""
VoVNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(VoVInitBlock, self).__init__()
mid_channels = out_channels // 2
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=2)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
stride=2)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class VoVNet(nn.Module):
"""
VoVNet model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
branch_channels : list of list of int
Number of branch output channels for each unit.
num_branches : int
Number of branches for the each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
branch_channels,
num_branches,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(VoVNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
init_block_channels = 128
self.features = nn.Sequential()
self.features.add_module("init_block", VoVInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
use_residual = (j != 0)
resize = (j == 0) and (i != 0)
stage.add_module("unit{}".format(j + 1), VoVUnit(
in_channels=in_channels,
out_channels=out_channels,
branch_channels=branch_channels[i][j],
num_branches=num_branches,
resize=resize,
use_residual=use_residual))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight, mode="fan_out", nonlinearity="relu")
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.BatchNorm2d):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_vovnet(blocks,
slim=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
slim : bool, default False
Whether to use a slim model.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 27:
layers = [1, 1, 1, 1]
elif blocks == 39:
layers = [1, 1, 2, 2]
elif blocks == 57:
layers = [1, 1, 4, 3]
else:
raise ValueError("Unsupported VoVNet with number of blocks: {}".format(blocks))
assert (sum(layers) * 6 + 3 == blocks)
num_branches = 5
channels_per_layers = [256, 512, 768, 1024]
branch_channels_per_layers = [128, 160, 192, 224]
if slim:
channels_per_layers = [ci // 2 for ci in channels_per_layers]
branch_channels_per_layers = [ci // 2 for ci in branch_channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
branch_channels = [[ci] * li for (ci, li) in zip(branch_channels_per_layers, layers)]
net = VoVNet(
channels=channels,
branch_channels=branch_channels,
num_branches=num_branches,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def vovnet27s(**kwargs):
"""
VoVNet-27-slim model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vovnet(blocks=27, slim=True, model_name="vovnet27s", **kwargs)
def vovnet39(**kwargs):
"""
VoVNet-39 model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vovnet(blocks=39, model_name="vovnet39", **kwargs)
def vovnet57(**kwargs):
"""
VoVNet-57 model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_vovnet(blocks=57, model_name="vovnet57", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
vovnet27s,
vovnet39,
vovnet57,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != vovnet27s or weight_count == 3525736)
assert (model != vovnet39 or weight_count == 22600296)
assert (model != vovnet57 or weight_count == 36640296)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 10,220
| 29.601796
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/espnetv2.py
|
"""
ESPNetv2 for ImageNet-1K, implemented in PyTorch.
Original paper: 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,'
https://arxiv.org/abs/1811.11431.
"""
__all__ = ['ESPNetv2', 'espnetv2_wd2', 'espnetv2_w1', 'espnetv2_w5d4', 'espnetv2_w3d2', 'espnetv2_w2']
import os
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv3x3, conv1x1_block, conv3x3_block, DualPathSequential
class PreActivation(nn.Module):
"""
PreResNet like pure pre-activation block without convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
"""
def __init__(self,
in_channels):
super(PreActivation, self).__init__()
self.bn = nn.BatchNorm2d(num_features=in_channels)
self.activ = nn.PReLU(num_parameters=in_channels)
def forward(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class ShortcutBlock(nn.Module):
"""
ESPNetv2 shortcut block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(ShortcutBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
activation=(lambda: nn.PReLU(in_channels)))
self.conv2 = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class HierarchicalConcurrent(nn.Sequential):
"""
A container for hierarchical concatenation of modules on the base of the sequential container.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
"""
def __init__(self, axis=1):
super(HierarchicalConcurrent, self).__init__()
self.axis = axis
def forward(self, x):
out = []
y_prev = None
for module in self._modules.values():
y = module(x)
if y_prev is not None:
y += y_prev
out.append(y)
y_prev = y
out = torch.cat(tuple(out), dim=self.axis)
return out
class ESPBlock(nn.Module):
"""
ESPNetv2 block (so-called EESP block).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the branch convolution layers.
dilations : list of int
Dilation values for branches.
"""
def __init__(self,
in_channels,
out_channels,
stride,
dilations):
super(ESPBlock, self).__init__()
num_branches = len(dilations)
assert (out_channels % num_branches == 0)
self.downsample = (stride != 1)
mid_channels = out_channels // num_branches
self.reduce_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
groups=num_branches,
activation=(lambda: nn.PReLU(mid_channels)))
self.branches = HierarchicalConcurrent()
for i in range(num_branches):
self.branches.add_module("branch{}".format(i + 1), conv3x3(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
padding=dilations[i],
dilation=dilations[i],
groups=mid_channels))
self.merge_conv = conv1x1_block(
in_channels=out_channels,
out_channels=out_channels,
groups=num_branches,
activation=None)
self.preactiv = PreActivation(in_channels=out_channels)
if not self.downsample:
self.activ = nn.PReLU(out_channels)
def forward(self, x, x0):
y = self.reduce_conv(x)
y = self.branches(y)
y = self.preactiv(y)
y = self.merge_conv(y)
if not self.downsample:
y = y + x
y = self.activ(y)
return y, x0
class DownsampleBlock(nn.Module):
"""
ESPNetv2 downsample block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
x0_channels : int
Number of input channels for shortcut.
dilations : list of int
Dilation values for branches in EESP block.
"""
def __init__(self,
in_channels,
out_channels,
x0_channels,
dilations):
super(DownsampleBlock, self).__init__()
inc_channels = out_channels - in_channels
self.pool = nn.AvgPool2d(
kernel_size=3,
stride=2,
padding=1)
self.eesp = ESPBlock(
in_channels=in_channels,
out_channels=inc_channels,
stride=2,
dilations=dilations)
self.shortcut_block = ShortcutBlock(
in_channels=x0_channels,
out_channels=out_channels)
self.activ = nn.PReLU(out_channels)
def forward(self, x, x0):
y1 = self.pool(x)
y2, _ = self.eesp(x, None)
x = torch.cat((y1, y2), dim=1)
x0 = self.pool(x0)
y3 = self.shortcut_block(x0)
x = x + y3
x = self.activ(x)
return x, x0
class ESPInitBlock(nn.Module):
"""
ESPNetv2 initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(ESPInitBlock, self).__init__()
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2,
activation=(lambda: nn.PReLU(out_channels)))
self.pool = nn.AvgPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x, x0):
x = self.conv(x)
x0 = self.pool(x0)
return x, x0
class ESPFinalBlock(nn.Module):
"""
ESPNetv2 final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
final_groups : int
Number of groups in the last convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
final_groups):
super(ESPFinalBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
groups=in_channels,
activation=(lambda: nn.PReLU(in_channels)))
self.conv2 = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
groups=final_groups,
activation=(lambda: nn.PReLU(out_channels)))
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class ESPNetv2(nn.Module):
"""
ESPNetv2 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,'
https://arxiv.org/abs/1811.11431.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
final_block_groups : int
Number of groups for the final unit.
dilations : list of list of list of int
Dilation values for branches in each unit.
dropout_rate : float, default 0.2
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
final_block_groups,
dilations,
dropout_rate=0.2,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ESPNetv2, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
x0_channels = in_channels
self.features = DualPathSequential(
return_two=False,
first_ordinals=0,
last_ordinals=2)
self.features.add_module("init_block", ESPInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential()
for j, out_channels in enumerate(channels_per_stage):
if j == 0:
unit = DownsampleBlock(
in_channels=in_channels,
out_channels=out_channels,
x0_channels=x0_channels,
dilations=dilations[i][j])
else:
unit = ESPBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=1,
dilations=dilations[i][j])
stage.add_module("unit{}".format(j + 1), unit)
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", ESPFinalBlock(
in_channels=in_channels,
out_channels=final_block_channels,
final_groups=final_block_groups))
in_channels = final_block_channels
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Sequential()
self.output.add_module("dropout", nn.Dropout(p=dropout_rate))
self.output.add_module("fc", nn.Linear(
in_features=in_channels,
out_features=num_classes))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x, x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_espnetv2(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ESPNetv2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert (width_scale <= 2.0)
branches = 4
layers = [1, 4, 8, 4]
max_dilation_list = [6, 5, 4, 3, 2]
max_dilations = [[max_dilation_list[i]] + [max_dilation_list[i + 1]] * (li - 1) for (i, li) in enumerate(layers)]
dilations = [[sorted([k + 1 if k < dij else 1 for k in range(branches)]) for dij in di] for di in max_dilations]
base_channels = 32
weighed_base_channels = math.ceil(float(math.floor(base_channels * width_scale)) / branches) * branches
channels_per_layers = [weighed_base_channels * pow(2, i + 1) for i in range(len(layers))]
init_block_channels = base_channels if weighed_base_channels > base_channels else weighed_base_channels
final_block_channels = 1024 if width_scale <= 1.5 else 1280
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = ESPNetv2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
final_block_groups=branches,
dilations=dilations,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def espnetv2_wd2(**kwargs):
"""
ESPNetv2 x0.5 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=0.5, model_name="espnetv2_wd2", **kwargs)
def espnetv2_w1(**kwargs):
"""
ESPNetv2 x1.0 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=1.0, model_name="espnetv2_w1", **kwargs)
def espnetv2_w5d4(**kwargs):
"""
ESPNetv2 x1.25 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=1.25, model_name="espnetv2_w5d4", **kwargs)
def espnetv2_w3d2(**kwargs):
"""
ESPNetv2 x1.5 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=1.5, model_name="espnetv2_w3d2", **kwargs)
def espnetv2_w2(**kwargs):
"""
ESPNetv2 x2.0 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=2.0, model_name="espnetv2_w2", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
espnetv2_wd2,
espnetv2_w1,
espnetv2_w5d4,
espnetv2_w3d2,
espnetv2_w2,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
# assert (model != espnetv2_wd2 or weight_count == 1241332)
# assert (model != espnetv2_w1 or weight_count == 1670072)
# assert (model != espnetv2_w5d4 or weight_count == 1965440)
# assert (model != espnetv2_w3d2 or weight_count == 2314856)
# assert (model != espnetv2_w2 or weight_count == 3498136)
assert (model != espnetv2_wd2 or weight_count == 1241092)
assert (model != espnetv2_w1 or weight_count == 1669592)
assert (model != espnetv2_w5d4 or weight_count == 1964832)
assert (model != espnetv2_w3d2 or weight_count == 2314120)
assert (model != espnetv2_w2 or weight_count == 3497144)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 17,203
| 30.336976
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/shufflenet.py
|
"""
ShuffleNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
"""
__all__ = ['ShuffleNet', 'shufflenet_g1_w1', 'shufflenet_g2_w1', 'shufflenet_g3_w1', 'shufflenet_g4_w1',
'shufflenet_g8_w1', 'shufflenet_g1_w3d4', 'shufflenet_g3_w3d4', 'shufflenet_g1_wd2', 'shufflenet_g3_wd2',
'shufflenet_g1_wd4', 'shufflenet_g3_wd4']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, conv3x3, depthwise_conv3x3, ChannelShuffle
class ShuffleUnit(nn.Module):
"""
ShuffleNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
groups,
downsample,
ignore_group):
super(ShuffleUnit, self).__init__()
self.downsample = downsample
mid_channels = out_channels // 4
if downsample:
out_channels -= in_channels
self.compress_conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=(1 if ignore_group else groups))
self.compress_bn1 = nn.BatchNorm2d(num_features=mid_channels)
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups)
self.dw_conv2 = depthwise_conv3x3(
channels=mid_channels,
stride=(2 if self.downsample else 1))
self.dw_bn2 = nn.BatchNorm2d(num_features=mid_channels)
self.expand_conv3 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
groups=groups)
self.expand_bn3 = nn.BatchNorm2d(num_features=out_channels)
if downsample:
self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
x = self.compress_conv1(x)
x = self.compress_bn1(x)
x = self.activ(x)
x = self.c_shuffle(x)
x = self.dw_conv2(x)
x = self.dw_bn2(x)
x = self.expand_conv3(x)
x = self.expand_bn3(x)
if self.downsample:
identity = self.avgpool(identity)
x = torch.cat((x, identity), dim=1)
else:
x = x + identity
x = self.activ(x)
return x
class ShuffleInitBlock(nn.Module):
"""
ShuffleNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(ShuffleInitBlock, self).__init__()
self.conv = conv3x3(
in_channels=in_channels,
out_channels=out_channels,
stride=2)
self.bn = nn.BatchNorm2d(num_features=out_channels)
self.activ = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
x = self.pool(x)
return x
class ShuffleNet(nn.Module):
"""
ShuffleNet model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
groups,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ShuffleNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ShuffleInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = (i == 0) and (j == 0)
stage.add_module("unit{}".format(j + 1), ShuffleUnit(
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
downsample=downsample,
ignore_group=ignore_group))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_shufflenet(groups,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ShuffleNet model with specific parameters.
Parameters:
----------
groups : int
Number of groups in convolution layers.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
layers = [4, 8, 4]
if groups == 1:
channels_per_layers = [144, 288, 576]
elif groups == 2:
channels_per_layers = [200, 400, 800]
elif groups == 3:
channels_per_layers = [240, 480, 960]
elif groups == 4:
channels_per_layers = [272, 544, 1088]
elif groups == 8:
channels_per_layers = [384, 768, 1536]
else:
raise ValueError("The {} of groups is not supported".format(groups))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
net = ShuffleNet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def shufflenet_g1_w1(**kwargs):
"""
ShuffleNet 1x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=1.0, model_name="shufflenet_g1_w1", **kwargs)
def shufflenet_g2_w1(**kwargs):
"""
ShuffleNet 1x (g=2) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=2, width_scale=1.0, model_name="shufflenet_g2_w1", **kwargs)
def shufflenet_g3_w1(**kwargs):
"""
ShuffleNet 1x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=1.0, model_name="shufflenet_g3_w1", **kwargs)
def shufflenet_g4_w1(**kwargs):
"""
ShuffleNet 1x (g=4) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=4, width_scale=1.0, model_name="shufflenet_g4_w1", **kwargs)
def shufflenet_g8_w1(**kwargs):
"""
ShuffleNet 1x (g=8) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=8, width_scale=1.0, model_name="shufflenet_g8_w1", **kwargs)
def shufflenet_g1_w3d4(**kwargs):
"""
ShuffleNet 0.75x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=0.75, model_name="shufflenet_g1_w3d4", **kwargs)
def shufflenet_g3_w3d4(**kwargs):
"""
ShuffleNet 0.75x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=0.75, model_name="shufflenet_g3_w3d4", **kwargs)
def shufflenet_g1_wd2(**kwargs):
"""
ShuffleNet 0.5x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=0.5, model_name="shufflenet_g1_wd2", **kwargs)
def shufflenet_g3_wd2(**kwargs):
"""
ShuffleNet 0.5x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=0.5, model_name="shufflenet_g3_wd2", **kwargs)
def shufflenet_g1_wd4(**kwargs):
"""
ShuffleNet 0.25x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=0.25, model_name="shufflenet_g1_wd4", **kwargs)
def shufflenet_g3_wd4(**kwargs):
"""
ShuffleNet 0.25x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=0.25, model_name="shufflenet_g3_wd4", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
shufflenet_g1_w1,
shufflenet_g2_w1,
shufflenet_g3_w1,
shufflenet_g4_w1,
shufflenet_g8_w1,
shufflenet_g1_w3d4,
shufflenet_g3_w3d4,
shufflenet_g1_wd2,
shufflenet_g3_wd2,
shufflenet_g1_wd4,
shufflenet_g3_wd4,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenet_g1_w1 or weight_count == 1531936)
assert (model != shufflenet_g2_w1 or weight_count == 1733848)
assert (model != shufflenet_g3_w1 or weight_count == 1865728)
assert (model != shufflenet_g4_w1 or weight_count == 1968344)
assert (model != shufflenet_g8_w1 or weight_count == 2434768)
assert (model != shufflenet_g1_w3d4 or weight_count == 975214)
assert (model != shufflenet_g3_w3d4 or weight_count == 1238266)
assert (model != shufflenet_g1_wd2 or weight_count == 534484)
assert (model != shufflenet_g3_wd2 or weight_count == 718324)
assert (model != shufflenet_g1_wd4 or weight_count == 209746)
assert (model != shufflenet_g3_wd4 or weight_count == 305902)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 15,779
| 31.875
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/bamresnet.py
|
"""
BAM-ResNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
"""
__all__ = ['BamResNet', 'bam_resnet18', 'bam_resnet34', 'bam_resnet50', 'bam_resnet101', 'bam_resnet152']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, conv1x1_block, conv3x3_block
from .resnet import ResInitBlock, ResUnit
class DenseBlock(nn.Module):
"""
Standard dense block with Batch normalization and ReLU activation.
Parameters:
----------
in_features : int
Number of input features.
out_features : int
Number of output features.
"""
def __init__(self,
in_features,
out_features):
super(DenseBlock, self).__init__()
self.fc = nn.Linear(
in_features=in_features,
out_features=out_features)
self.bn = nn.BatchNorm1d(num_features=out_features)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.fc(x)
x = self.bn(x)
x = self.activ(x)
return x
class ChannelGate(nn.Module):
"""
BAM channel gate block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
num_layers : int, default 1
Number of dense blocks.
"""
def __init__(self,
channels,
reduction_ratio=16,
num_layers=1):
super(ChannelGate, self).__init__()
mid_channels = channels // reduction_ratio
self.pool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.init_fc = DenseBlock(
in_features=channels,
out_features=mid_channels)
self.main_fcs = nn.Sequential()
for i in range(num_layers - 1):
self.main_fcs.add_module("fc{}".format(i + 1), DenseBlock(
in_features=mid_channels,
out_features=mid_channels))
self.final_fc = nn.Linear(
in_features=mid_channels,
out_features=channels)
def forward(self, x):
input = x
x = self.pool(x)
x = x.view(x.size(0), -1)
x = self.init_fc(x)
x = self.main_fcs(x)
x = self.final_fc(x)
x = x.unsqueeze(2).unsqueeze(3).expand_as(input)
return x
class SpatialGate(nn.Module):
"""
BAM spatial gate block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
num_dil_convs : int, default 2
Number of dilated convolutions.
dilation : int, default 4
Dilation/padding value for corresponding convolutions.
"""
def __init__(self,
channels,
reduction_ratio=16,
num_dil_convs=2,
dilation=4):
super(SpatialGate, self).__init__()
mid_channels = channels // reduction_ratio
self.init_conv = conv1x1_block(
in_channels=channels,
out_channels=mid_channels,
stride=1,
bias=True)
self.dil_convs = nn.Sequential()
for i in range(num_dil_convs):
self.dil_convs.add_module("conv{}".format(i + 1), conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=1,
padding=dilation,
dilation=dilation,
bias=True))
self.final_conv = conv1x1(
in_channels=mid_channels,
out_channels=1,
stride=1,
bias=True)
def forward(self, x):
input = x
x = self.init_conv(x)
x = self.dil_convs(x)
x = self.final_conv(x)
x = x.expand_as(input)
return x
class BamBlock(nn.Module):
"""
BAM attention block for BAM-ResNet.
Parameters:
----------
channels : int
Number of input/output channels.
"""
def __init__(self,
channels):
super(BamBlock, self).__init__()
self.ch_att = ChannelGate(channels=channels)
self.sp_att = SpatialGate(channels=channels)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
att = 1 + self.sigmoid(self.ch_att(x) * self.sp_att(x))
x = x * att
return x
class BamResUnit(nn.Module):
"""
BAM-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck):
super(BamResUnit, self).__init__()
self.use_bam = (stride != 1)
if self.use_bam:
self.bam = BamBlock(channels=in_channels)
self.res_unit = ResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=False)
def forward(self, x):
if self.use_bam:
x = self.bam(x)
x = self.res_unit(x)
return x
class BamResNet(nn.Module):
"""
BAM-ResNet model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(BamResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), BamResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_resnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create BAM-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
use_se : bool
Whether to use SE block.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported BAM-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = BamResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def bam_resnet18(**kwargs):
"""
BAM-ResNet-18 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="bam_resnet18", **kwargs)
def bam_resnet34(**kwargs):
"""
BAM-ResNet-34 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="bam_resnet34", **kwargs)
def bam_resnet50(**kwargs):
"""
BAM-ResNet-50 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="bam_resnet50", **kwargs)
def bam_resnet101(**kwargs):
"""
BAM-ResNet-101 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="bam_resnet101", **kwargs)
def bam_resnet152(**kwargs):
"""
BAM-ResNet-152 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="bam_resnet152", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
bam_resnet18,
bam_resnet34,
bam_resnet50,
bam_resnet101,
bam_resnet152,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != bam_resnet18 or weight_count == 11712503)
assert (model != bam_resnet34 or weight_count == 21820663)
assert (model != bam_resnet50 or weight_count == 25915099)
assert (model != bam_resnet101 or weight_count == 44907227)
assert (model != bam_resnet152 or weight_count == 60550875)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 13,297
| 28.420354
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/resattnet.py
|
"""
ResAttNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
"""
__all__ = ['ResAttNet', 'resattnet56', 'resattnet92', 'resattnet128', 'resattnet164', 'resattnet200', 'resattnet236',
'resattnet452']
import os
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from .common import conv1x1, conv7x7_block, pre_conv1x1_block, pre_conv3x3_block, Hourglass
class PreResBottleneck(nn.Module):
"""
PreResNet bottleneck block for residual path in PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride):
super(PreResBottleneck, self).__init__()
mid_channels = out_channels // 4
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
return_preact=True)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels)
def forward(self, x):
x, x_pre_activ = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x, x_pre_activ
class ResBlock(nn.Module):
"""
Residual block with pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride=1):
super(ResBlock, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = PreResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
def forward(self, x):
identity = x
x, x_pre_activ = self.body(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x = x + identity
return x
class InterpolationBlock(nn.Module):
"""
Interpolation block.
Parameters:
----------
scale_factor : float
Multiplier for spatial size.
"""
def __init__(self,
scale_factor):
super(InterpolationBlock, self).__init__()
self.scale_factor = scale_factor
def forward(self, x):
return F.interpolate(
input=x,
scale_factor=self.scale_factor,
mode="bilinear",
align_corners=True)
class DoubleSkipBlock(nn.Module):
"""
Double skip connection block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(DoubleSkipBlock, self).__init__()
self.skip1 = ResBlock(
in_channels=in_channels,
out_channels=out_channels)
def forward(self, x):
x = x + self.skip1(x)
return x
class ResBlockSequence(nn.Module):
"""
Sequence of residual blocks with pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
length : int
Length of sequence.
"""
def __init__(self,
in_channels,
out_channels,
length):
super(ResBlockSequence, self).__init__()
self.blocks = nn.Sequential()
for i in range(length):
self.blocks.add_module("block{}".format(i + 1), ResBlock(
in_channels=in_channels,
out_channels=out_channels))
def forward(self, x):
x = self.blocks(x)
return x
class DownAttBlock(nn.Module):
"""
Down sub-block for hourglass of attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
length : int
Length of residual blocks list.
"""
def __init__(self,
in_channels,
out_channels,
length):
super(DownAttBlock, self).__init__()
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
self.res_blocks = ResBlockSequence(
in_channels=in_channels,
out_channels=out_channels,
length=length)
def forward(self, x):
x = self.pool(x)
x = self.res_blocks(x)
return x
class UpAttBlock(nn.Module):
"""
Up sub-block for hourglass of attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
length : int
Length of residual blocks list.
scale_factor : float
Multiplier for spatial size.
"""
def __init__(self,
in_channels,
out_channels,
length,
scale_factor):
super(UpAttBlock, self).__init__()
self.res_blocks = ResBlockSequence(
in_channels=in_channels,
out_channels=out_channels,
length=length)
self.upsample = InterpolationBlock(scale_factor)
def forward(self, x):
x = self.res_blocks(x)
x = self.upsample(x)
return x
class MiddleAttBlock(nn.Module):
"""
Middle sub-block for attention block.
Parameters:
----------
channels : int
Number of input/output channels.
"""
def __init__(self,
channels):
super(MiddleAttBlock, self).__init__()
self.conv1 = pre_conv1x1_block(
in_channels=channels,
out_channels=channels)
self.conv2 = pre_conv1x1_block(
in_channels=channels,
out_channels=channels)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.sigmoid(x)
return x
class AttBlock(nn.Module):
"""
Attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
hourglass_depth : int
Depth of hourglass block.
att_scales : list of int
Attention block specific scales.
"""
def __init__(self,
in_channels,
out_channels,
hourglass_depth,
att_scales):
super(AttBlock, self).__init__()
assert (len(att_scales) == 3)
scale_factor = 2
scale_p, scale_t, scale_r = att_scales
self.init_blocks = ResBlockSequence(
in_channels=in_channels,
out_channels=out_channels,
length=scale_p)
down_seq = nn.Sequential()
up_seq = nn.Sequential()
skip_seq = nn.Sequential()
for i in range(hourglass_depth):
down_seq.add_module("down{}".format(i + 1), DownAttBlock(
in_channels=in_channels,
out_channels=out_channels,
length=scale_r))
up_seq.add_module("up{}".format(i + 1), UpAttBlock(
in_channels=in_channels,
out_channels=out_channels,
length=scale_r,
scale_factor=scale_factor))
if i == 0:
skip_seq.add_module("skip1", ResBlockSequence(
in_channels=in_channels,
out_channels=out_channels,
length=scale_t))
else:
skip_seq.add_module("skip{}".format(i + 1), DoubleSkipBlock(
in_channels=in_channels,
out_channels=out_channels))
self.hg = Hourglass(
down_seq=down_seq,
up_seq=up_seq,
skip_seq=skip_seq,
return_first_skip=True)
self.middle_block = MiddleAttBlock(channels=out_channels)
self.final_block = ResBlock(
in_channels=in_channels,
out_channels=out_channels)
def forward(self, x):
x = self.init_blocks(x)
x, y = self.hg(x)
x = self.middle_block(x)
x = (1 + x) * y
x = self.final_block(x)
return x
class ResAttInitBlock(nn.Module):
"""
ResAttNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(ResAttInitBlock, self).__init__()
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class PreActivation(nn.Module):
"""
Pre-activation block without convolution layer. It's used by itself as the final block in PreResNet.
Parameters:
----------
in_channels : int
Number of input channels.
"""
def __init__(self,
in_channels):
super(PreActivation, self).__init__()
self.bn = nn.BatchNorm2d(num_features=in_channels)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class ResAttNet(nn.Module):
"""
ResAttNet model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
attentions : list of list of int
Whether to use a attention unit or residual one.
att_scales : list of int
Attention block specific scales.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
attentions,
att_scales,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ResAttNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResAttInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
hourglass_depth = len(channels) - 1 - i
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 1 if (i == 0) or (j != 0) else 2
if attentions[i][j]:
stage.add_module("unit{}".format(j + 1), AttBlock(
in_channels=in_channels,
out_channels=out_channels,
hourglass_depth=hourglass_depth,
att_scales=att_scales))
else:
stage.add_module("unit{}".format(j + 1), ResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_resattnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ResAttNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 56:
att_layers = [1, 1, 1]
att_scales = [1, 2, 1]
elif blocks == 92:
att_layers = [1, 2, 3]
att_scales = [1, 2, 1]
elif blocks == 128:
att_layers = [2, 3, 4]
att_scales = [1, 2, 1]
elif blocks == 164:
att_layers = [3, 4, 5]
att_scales = [1, 2, 1]
elif blocks == 200:
att_layers = [4, 5, 6]
att_scales = [1, 2, 1]
elif blocks == 236:
att_layers = [5, 6, 7]
att_scales = [1, 2, 1]
elif blocks == 452:
att_layers = [5, 6, 7]
att_scales = [2, 4, 3]
else:
raise ValueError("Unsupported ResAttNet with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
layers = att_layers + [2]
channels = [[ci] * (li + 1) for (ci, li) in zip(channels_per_layers, layers)]
attentions = [[0] + [1] * li for li in att_layers] + [[0] * 3]
net = ResAttNet(
channels=channels,
init_block_channels=init_block_channels,
attentions=attentions,
att_scales=att_scales,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def resattnet56(**kwargs):
"""
ResAttNet-56 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=56, model_name="resattnet56", **kwargs)
def resattnet92(**kwargs):
"""
ResAttNet-92 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=92, model_name="resattnet92", **kwargs)
def resattnet128(**kwargs):
"""
ResAttNet-128 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=128, model_name="resattnet128", **kwargs)
def resattnet164(**kwargs):
"""
ResAttNet-164 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=164, model_name="resattnet164", **kwargs)
def resattnet200(**kwargs):
"""
ResAttNet-200 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=200, model_name="resattnet200", **kwargs)
def resattnet236(**kwargs):
"""
ResAttNet-236 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=236, model_name="resattnet236", **kwargs)
def resattnet452(**kwargs):
"""
ResAttNet-452 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=452, model_name="resattnet452", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
resattnet56,
resattnet92,
resattnet128,
resattnet164,
resattnet200,
resattnet236,
resattnet452,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resattnet56 or weight_count == 31810728)
assert (model != resattnet92 or weight_count == 52466344)
assert (model != resattnet128 or weight_count == 65294504)
assert (model != resattnet164 or weight_count == 78122664)
assert (model != resattnet200 or weight_count == 90950824)
assert (model != resattnet236 or weight_count == 103778984)
assert (model != resattnet452 or weight_count == 182285224)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 20,035
| 28.464706
| 117
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/centernet.py
|
"""
CenterNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Objects as Points,' https://arxiv.org/abs/1904.07850.
"""
__all__ = ['CenterNet', 'centernet_resnet18_voc', 'centernet_resnet18_coco', 'centernet_resnet50b_voc',
'centernet_resnet50b_coco', 'centernet_resnet101b_voc', 'centernet_resnet101b_coco',
'CenterNetHeatmapMaxDet']
import os
import torch
import torch.nn as nn
from .common import conv1x1, conv3x3_block, DeconvBlock, Concurrent
from .resnet import resnet18, resnet50b, resnet101b
class CenterNetDecoderUnit(nn.Module):
"""
CenterNet decoder unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(CenterNetDecoderUnit, self).__init__()
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bias=True)
self.deconv = DeconvBlock(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=4,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.deconv(x)
return x
class CenterNetHeadBlock(nn.Module):
"""
CenterNet simple head block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(CenterNetHeadBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
bias=True,
use_bn=False)
self.conv2 = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
bias=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class CenterNetHeatmapBlock(nn.Module):
"""
CenterNet heatmap block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
do_nms : bool
Whether do NMS (or simply clip for training otherwise).
"""
def __init__(self,
in_channels,
out_channels,
do_nms):
super(CenterNetHeatmapBlock, self).__init__()
self.do_nms = do_nms
self.head = CenterNetHeadBlock(
in_channels=in_channels,
out_channels=out_channels)
self.sigmoid = nn.Sigmoid()
if self.do_nms:
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
x = self.head(x)
x = self.sigmoid(x)
if self.do_nms:
y = self.pool(x)
x = x * (y == x)
else:
eps = 1e-4
x = x.clamp(min=eps, max=(1.0 - eps))
return x
class CenterNetHeatmapMaxDet(nn.Module):
"""
CenterNet decoder for heads (heatmap, wh, reg).
Parameters:
----------
topk : int, default 40
Keep only `topk` detections.
scale : int, default is 4
Downsampling scale factor.
"""
def __init__(self,
topk=40,
scale=4):
super(CenterNetHeatmapMaxDet, self).__init__()
self.topk = topk
self.scale = scale
def forward(self, x):
heatmap = x[:, :-4]
wh = x[:, -4:-2]
reg = x[:, -2:]
batch, _, out_h, out_w = heatmap.shape
scores, indices = heatmap.view((batch, -1)).topk(k=self.topk)
topk_classes = (indices / (out_h * out_w)).type(torch.float32)
topk_indices = indices.fmod(out_h * out_w)
topk_ys = (topk_indices / out_w).type(torch.float32)
topk_xs = topk_indices.fmod(out_w).type(torch.float32)
center = reg.permute(0, 2, 3, 1).view((batch, -1, 2))
wh = wh.permute(0, 2, 3, 1).view((batch, -1, 2))
xs = torch.gather(center[:, :, 0], dim=-1, index=topk_indices)
ys = torch.gather(center[:, :, 1], dim=-1, index=topk_indices)
topk_xs = topk_xs + xs
topk_ys = topk_ys + ys
w = torch.gather(wh[:, :, 0], dim=-1, index=topk_indices)
h = torch.gather(wh[:, :, 1], dim=-1, index=topk_indices)
half_w = 0.5 * w
half_h = 0.5 * h
bboxes = torch.stack((topk_xs - half_w, topk_ys - half_h, topk_xs + half_w, topk_ys + half_h), dim=-1)
bboxes = bboxes * self.scale
topk_classes = topk_classes.unsqueeze(dim=-1)
scores = scores.unsqueeze(dim=-1)
result = torch.cat((bboxes, topk_classes, scores), dim=-1)
return result
def __repr__(self):
s = "{name}(topk={topk}, scale={scale})"
return s.format(
name=self.__class__.__name__,
topk=self.topk,
scale=self.scale)
def calc_flops(self, x):
assert (x.shape[0] == 1)
num_flops = 10 * x.size
num_macs = 0
return num_flops, num_macs
class CenterNet(nn.Module):
"""
CenterNet model from 'Objects as Points,' https://arxiv.org/abs/1904.07850.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
channels : list of int
Number of output channels for each decoder unit.
return_heatmap : bool, default False
Whether to return only heatmap.
topk : int, default 40
Keep only `topk` detections.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (512, 512)
Spatial size of the expected input image.
num_classes : int, default 80
Number of classification classes.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
return_heatmap=False,
topk=40,
in_channels=3,
in_size=(512, 512),
num_classes=80):
super(CenterNet, self).__init__()
self.in_size = in_size
self.in_channels = in_channels
self.return_heatmap = return_heatmap
self.backbone = backbone
self.decoder = nn.Sequential()
in_channels = backbone_out_channels
for i, out_channels in enumerate(channels):
self.decoder.add_module("unit{}".format(i + 1), CenterNetDecoderUnit(
in_channels=in_channels,
out_channels=out_channels))
in_channels = out_channels
heads = Concurrent()
heads.add_module("heapmap_block", CenterNetHeatmapBlock(
in_channels=in_channels,
out_channels=num_classes,
do_nms=(not self.return_heatmap)))
heads.add_module("wh_block", CenterNetHeadBlock(
in_channels=in_channels,
out_channels=2))
heads.add_module("reg_block", CenterNetHeadBlock(
in_channels=in_channels,
out_channels=2))
self.decoder.add_module("heads", heads)
if not self.return_heatmap:
self.heatmap_max_det = CenterNetHeatmapMaxDet(
topk=topk,
scale=4)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.backbone(x)
x = self.decoder(x)
if not self.return_heatmap:
x = self.heatmap_max_det(x)
return x
def get_centernet(backbone,
backbone_out_channels,
num_classes,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create CenterNet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
num_classes : int
Number of classes.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
Returns:
-------
nn.Module
A network.
"""
channels = [256, 128, 64]
net = CenterNet(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def centernet_resnet18_voc(pretrained_backbone=False, num_classes=20, **kwargs):
"""
CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 20
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnet18(pretrained=pretrained_backbone).features
del backbone[-1]
return get_centernet(backbone=backbone, backbone_out_channels=512, num_classes=num_classes,
model_name="centernet_resnet18_voc", **kwargs)
def centernet_resnet18_coco(pretrained_backbone=False, num_classes=80, **kwargs):
"""
CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 80
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnet18(pretrained=pretrained_backbone).features
del backbone[-1]
return get_centernet(backbone=backbone, backbone_out_channels=512, num_classes=num_classes,
model_name="centernet_resnet18_coco", **kwargs)
def centernet_resnet50b_voc(pretrained_backbone=False, num_classes=20, **kwargs):
"""
CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 20
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone).features
del backbone[-1]
return get_centernet(backbone=backbone, backbone_out_channels=2048, num_classes=num_classes,
model_name="centernet_resnet50b_voc", **kwargs)
def centernet_resnet50b_coco(pretrained_backbone=False, num_classes=80, **kwargs):
"""
CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 80
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone).features
del backbone[-1]
return get_centernet(backbone=backbone, backbone_out_channels=2048, num_classes=num_classes,
model_name="centernet_resnet50b_coco", **kwargs)
def centernet_resnet101b_voc(pretrained_backbone=False, num_classes=20, **kwargs):
"""
CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 20
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnet101b(pretrained=pretrained_backbone).features
del backbone[-1]
return get_centernet(backbone=backbone, backbone_out_channels=2048, num_classes=num_classes,
model_name="centernet_resnet101b_voc", **kwargs)
def centernet_resnet101b_coco(pretrained_backbone=False, num_classes=80, **kwargs):
"""
CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 80
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnet101b(pretrained=pretrained_backbone).features
del backbone[-1]
return get_centernet(backbone=backbone, backbone_out_channels=2048, num_classes=num_classes,
model_name="centernet_resnet101b_coco", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
in_size = (512, 512)
topk = 40
return_heatmap = False
pretrained = False
models = [
(centernet_resnet18_voc, 20),
(centernet_resnet18_coco, 80),
(centernet_resnet50b_voc, 20),
(centernet_resnet50b_coco, 80),
(centernet_resnet101b_voc, 20),
(centernet_resnet101b_coco, 80),
]
for model, classes in models:
net = model(pretrained=pretrained, topk=topk, in_size=in_size, return_heatmap=return_heatmap)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != centernet_resnet18_voc or weight_count == 14215640)
assert (model != centernet_resnet18_coco or weight_count == 14219540)
assert (model != centernet_resnet50b_voc or weight_count == 30086104)
assert (model != centernet_resnet50b_coco or weight_count == 30090004)
assert (model != centernet_resnet101b_voc or weight_count == 49078232)
assert (model != centernet_resnet101b_coco or weight_count == 49082132)
batch = 14
x = torch.randn(batch, 3, in_size[0], in_size[1])
y = net(x)
assert (y.shape[0] == batch)
if return_heatmap:
assert (y.shape[1] == classes + 4) and (y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4)
else:
assert (y.shape[1] == topk) and (y.shape[2] == 6)
if __name__ == "__main__":
_test()
| 16,535
| 32.204819
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/xdensenet_cifar.py
|
"""
X-DenseNet for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
"""
__all__ = ['CIFARXDenseNet', 'xdensenet40_2_k24_bc_cifar10', 'xdensenet40_2_k24_bc_cifar100',
'xdensenet40_2_k24_bc_svhn', 'xdensenet40_2_k36_bc_cifar10', 'xdensenet40_2_k36_bc_cifar100',
'xdensenet40_2_k36_bc_svhn']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv3x3
from .preresnet import PreResActivation
from .densenet import TransitionBlock
from .xdensenet import pre_xconv3x3_block, XDenseUnit
class XDenseSimpleUnit(nn.Module):
"""
X-DenseNet simple unit for CIFAR.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
expand_ratio : int
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
dropout_rate,
expand_ratio):
super(XDenseSimpleUnit, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
inc_channels = out_channels - in_channels
self.conv = pre_xconv3x3_block(
in_channels=in_channels,
out_channels=inc_channels,
expand_ratio=expand_ratio)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x):
identity = x
x = self.conv(x)
if self.use_dropout:
x = self.dropout(x)
x = torch.cat((identity, x), dim=1)
return x
class CIFARXDenseNet(nn.Module):
"""
X-DenseNet model for CIFAR from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
expand_ratio : int, default 2
Ratio of expansion.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
dropout_rate=0.0,
expand_ratio=2,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARXDenseNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
unit_class = XDenseUnit if bottleneck else XDenseSimpleUnit
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
if i != 0:
stage.add_module("trans{}".format(i + 1), TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2)))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
stage.add_module("unit{}".format(j + 1), unit_class(
in_channels=in_channels,
out_channels=out_channels,
dropout_rate=dropout_rate,
expand_ratio=expand_ratio))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_xdensenet_cifar(num_classes,
blocks,
growth_rate,
bottleneck,
expand_ratio=2,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create X-DenseNet model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
blocks : int
Number of blocks.
growth_rate : int
Growth rate.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
expand_ratio : int, default 2
Ratio of expansion.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert (num_classes in [10, 100])
if bottleneck:
assert ((blocks - 4) % 6 == 0)
layers = [(blocks - 4) // 6] * 3
else:
assert ((blocks - 4) % 3 == 0)
layers = [(blocks - 4) // 3] * 3
init_block_channels = 2 * growth_rate
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = CIFARXDenseNet(
channels=channels,
init_block_channels=init_block_channels,
num_classes=num_classes,
bottleneck=bottleneck,
expand_ratio=expand_ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def xdensenet40_2_k24_bc_cifar10(num_classes=10, **kwargs):
"""
X-DenseNet-BC-40-2 (k=24) model for CIFAR-10 from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="xdensenet40_2_k24_bc_cifar10", **kwargs)
def xdensenet40_2_k24_bc_cifar100(num_classes=100, **kwargs):
"""
X-DenseNet-BC-40-2 (k=24) model for CIFAR-100 from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="xdensenet40_2_k24_bc_cifar100", **kwargs)
def xdensenet40_2_k24_bc_svhn(num_classes=10, **kwargs):
"""
X-DenseNet-BC-40-2 (k=24) model for SVHN from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="xdensenet40_2_k24_bc_svhn", **kwargs)
def xdensenet40_2_k36_bc_cifar10(num_classes=10, **kwargs):
"""
X-DenseNet-BC-40-2 (k=36) model for CIFAR-10 from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="xdensenet40_2_k36_bc_cifar10", **kwargs)
def xdensenet40_2_k36_bc_cifar100(num_classes=100, **kwargs):
"""
X-DenseNet-BC-40-2 (k=36) model for CIFAR-100 from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="xdensenet40_2_k36_bc_cifar100", **kwargs)
def xdensenet40_2_k36_bc_svhn(num_classes=10, **kwargs):
"""
X-DenseNet-BC-40-2 (k=36) model for SVHN from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="xdensenet40_2_k36_bc_svhn", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(xdensenet40_2_k24_bc_cifar10, 10),
(xdensenet40_2_k24_bc_cifar100, 100),
(xdensenet40_2_k24_bc_svhn, 10),
(xdensenet40_2_k36_bc_cifar10, 10),
(xdensenet40_2_k36_bc_cifar100, 100),
(xdensenet40_2_k36_bc_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != xdensenet40_2_k24_bc_cifar10 or weight_count == 690346)
assert (model != xdensenet40_2_k24_bc_cifar100 or weight_count == 714196)
assert (model != xdensenet40_2_k24_bc_svhn or weight_count == 690346)
assert (model != xdensenet40_2_k36_bc_cifar10 or weight_count == 1542682)
assert (model != xdensenet40_2_k36_bc_cifar100 or weight_count == 1578412)
assert (model != xdensenet40_2_k36_bc_svhn or weight_count == 1542682)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes))
if __name__ == "__main__":
_test()
| 12,852
| 33.831978
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/revnet.py
|
"""
RevNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'The Reversible Residual Network: Backpropagation Without Storing Activations,'
https://arxiv.org/abs/1707.04585.
"""
__all__ = ['RevNet', 'revnet38', 'revnet110', 'revnet164']
import os
from contextlib import contextmanager
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
from .common import conv1x1, conv3x3, conv1x1_block, conv3x3_block, pre_conv1x1_block, pre_conv3x3_block
use_context_mans = int(
torch.__version__[0]) * 100 + int(torch.__version__[2]) - (1 if 'a' in torch.__version__ else 0) > 3
@contextmanager
def set_grad_enabled(grad_mode):
if not use_context_mans:
yield
else:
with torch.set_grad_enabled(grad_mode) as c:
yield [c]
class ReversibleBlockFunction(torch.autograd.Function):
"""
RevNet reversible block function.
"""
@staticmethod
def forward(ctx, x, fm, gm, *params):
with torch.no_grad():
x1, x2 = torch.chunk(x, chunks=2, dim=1)
x1 = x1.contiguous()
x2 = x2.contiguous()
y1 = x1 + fm(x2)
y2 = x2 + gm(y1)
y = torch.cat((y1, y2), dim=1)
x1.set_()
x2.set_()
y1.set_()
y2.set_()
del x1, x2, y1, y2
ctx.save_for_backward(x, y)
ctx.fm = fm
ctx.gm = gm
return y
@staticmethod
def backward(ctx, grad_y):
fm = ctx.fm
gm = ctx.gm
x, y = ctx.saved_variables
y1, y2 = torch.chunk(y, chunks=2, dim=1)
y1 = y1.contiguous()
y2 = y2.contiguous()
with torch.no_grad():
y1_z = Variable(y1.data, requires_grad=True)
x2 = y2 - gm(y1_z)
x1 = y1 - fm(x2)
with set_grad_enabled(True):
x1_ = Variable(x1.data, requires_grad=True)
x2_ = Variable(x2.data, requires_grad=True)
y1_ = x1_ + fm.forward(x2_)
y2_ = x2_ + gm(y1_)
y = torch.cat((y1_, y2_), dim=1)
dd = torch.autograd.grad(y, (x1_, x2_) + tuple(gm.parameters()) + tuple(fm.parameters()), grad_y)
gm_params_len = len([p for p in gm.parameters()])
gm_params_grads = dd[2:2 + gm_params_len]
fm_params_grads = dd[2 + gm_params_len:]
grad_x = torch.cat((dd[0], dd[1]), dim=1)
y1_.detach_()
y2_.detach_()
del y1_, y2_
x.data.set_(torch.cat((x1, x2), dim=1).data.contiguous())
return (grad_x, None, None) + fm_params_grads + gm_params_grads
class ReversibleBlock(nn.Module):
"""
RevNet reversible block.
Parameters:
----------
fm : nn.Module
Fm-function.
gm : nn.Module
Gm-function.
"""
def __init__(self,
fm,
gm):
super(ReversibleBlock, self).__init__()
self.gm = gm
self.fm = fm
self.rev_funct = ReversibleBlockFunction.apply
def forward(self, x):
assert (x.shape[1] % 2 == 0)
params = [w for w in self.fm.parameters()] + [w for w in self.gm.parameters()]
y = self.rev_funct(x, self.fm, self.gm, *params)
x.data.set_()
return y
def inverse(self, y):
assert (y.shape[1] % 2 == 0)
y1, y2 = torch.chunk(y, chunks=2, dim=1)
y1 = y1.contiguous()
y2 = y2.contiguous()
x2 = y2 - self.gm(y1)
x1 = y1 - self.fm(x2)
x = torch.cat((x1, x2), dim=1)
return x
class RevResBlock(nn.Module):
"""
Simple RevNet block for residual path in RevNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
preactivate : bool
Whether use pre-activation for the first convolution block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
preactivate):
super(RevResBlock, self).__init__()
if preactivate:
self.conv1 = pre_conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
else:
self.conv1 = conv3x3(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.conv2 = pre_conv3x3_block(
in_channels=out_channels,
out_channels=out_channels)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class RevResBottleneck(nn.Module):
"""
RevNet bottleneck block for residual path in RevNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
preactivate : bool
Whether use pre-activation for the first convolution block.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
preactivate,
bottleneck_factor=4):
super(RevResBottleneck, self).__init__()
mid_channels = out_channels // bottleneck_factor
if preactivate:
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
else:
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class RevUnit(nn.Module):
"""
RevNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
preactivate : bool
Whether use pre-activation for the first convolution block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck,
preactivate):
super(RevUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
body_class = RevResBottleneck if bottleneck else RevResBlock
if (not self.resize_identity) and (stride == 1):
assert (in_channels % 2 == 0)
assert (out_channels % 2 == 0)
in_channels2 = in_channels // 2
out_channels2 = out_channels // 2
gm = body_class(
in_channels=in_channels2,
out_channels=out_channels2,
stride=1,
preactivate=preactivate)
fm = body_class(
in_channels=in_channels2,
out_channels=out_channels2,
stride=1,
preactivate=preactivate)
self.body = ReversibleBlock(gm, fm)
else:
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
preactivate=preactivate)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
x = self.body(x)
x = x + identity
else:
x = self.body(x)
return x
class RevPostActivation(nn.Module):
"""
RevNet specific post-activation block.
Parameters:
----------
in_channels : int
Number of input channels.
"""
def __init__(self,
in_channels):
super(RevPostActivation, self).__init__()
self.bn = nn.BatchNorm2d(num_features=in_channels)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class RevNet(nn.Module):
"""
RevNet model from 'The Reversible Residual Network: Backpropagation Without Storing Activations,'
https://arxiv.org/abs/1707.04585.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(RevNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
preactivate = (j != 0) or (i != 0)
stage.add_module("unit{}".format(j + 1), RevUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
preactivate=preactivate))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_postactiv", RevPostActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=56,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_revnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create RevNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 38:
layers = [3, 3, 3]
channels_per_layers = [32, 64, 112]
bottleneck = False
elif blocks == 110:
layers = [9, 9, 9]
channels_per_layers = [32, 64, 128]
bottleneck = False
elif blocks == 164:
layers = [9, 9, 9]
channels_per_layers = [128, 256, 512]
bottleneck = True
else:
raise ValueError("Unsupported RevNet with number of blocks: {}".format(blocks))
init_block_channels = 32
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = RevNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def revnet38(**kwargs):
"""
RevNet-38 model from 'The Reversible Residual Network: Backpropagation Without Storing Activations,'
https://arxiv.org/abs/1707.04585.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_revnet(blocks=38, model_name="revnet38", **kwargs)
def revnet110(**kwargs):
"""
RevNet-110 model from 'The Reversible Residual Network: Backpropagation Without Storing Activations,'
https://arxiv.org/abs/1707.04585.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_revnet(blocks=110, model_name="revnet110", **kwargs)
def revnet164(**kwargs):
"""
RevNet-164 model from 'The Reversible Residual Network: Backpropagation Without Storing Activations,'
https://arxiv.org/abs/1707.04585.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_revnet(blocks=164, model_name="revnet164", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
revnet38,
revnet110,
revnet164,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != revnet38 or weight_count == 685864)
assert (model != revnet110 or weight_count == 1982600)
assert (model != revnet164 or weight_count == 2491656)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 15,590
| 28.142056
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/ntsnet_cub.py
|
"""
NTS-Net for CUB-200-2011, implemented in PyTorch.
Original paper: 'Learning to Navigate for Fine-grained Classification,' https://arxiv.org/abs/1809.00287.
"""
__all__ = ['NTSNet', 'ntsnet_cub']
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from .common import conv1x1, conv3x3, Flatten
from .resnet import resnet50b
def hard_nms(cdds,
top_n=10,
iou_thresh=0.25):
"""
Hard Non-Maximum Suppression.
Parameters:
----------
cdds : np.array
Borders.
top_n : int, default 10
Number of top-K informative regions.
iou_thresh : float, default 0.25
IoU threshold.
Returns:
-------
np.array
Filtered borders.
"""
assert (type(cdds) == np.ndarray)
assert (len(cdds.shape) == 2)
assert (cdds.shape[1] >= 5)
cdds = cdds.copy()
indices = np.argsort(cdds[:, 0])
cdds = cdds[indices]
cdd_results = []
res = cdds
while res.any():
cdd = res[-1]
cdd_results.append(cdd)
if len(cdd_results) == top_n:
return np.array(cdd_results)
res = res[:-1]
start_max = np.maximum(res[:, 1:3], cdd[1:3])
end_min = np.minimum(res[:, 3:5], cdd[3:5])
lengths = end_min - start_max
intersec_map = lengths[:, 0] * lengths[:, 1]
intersec_map[np.logical_or(lengths[:, 0] < 0, lengths[:, 1] < 0)] = 0
iou_map_cur = intersec_map / ((res[:, 3] - res[:, 1]) * (res[:, 4] - res[:, 2]) + (cdd[3] - cdd[1]) * (
cdd[4] - cdd[2]) - intersec_map)
res = res[iou_map_cur < iou_thresh]
return np.array(cdd_results)
class NavigatorBranch(nn.Module):
"""
Navigator branch block for Navigator unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride):
super(NavigatorBranch, self).__init__()
mid_channels = 128
self.down_conv = conv3x3(
in_channels=in_channels,
out_channels=mid_channels,
stride=stride,
bias=True)
self.activ = nn.ReLU(inplace=False)
self.tidy_conv = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
bias=True)
self.flatten = Flatten()
def forward(self, x):
y = self.down_conv(x)
y = self.activ(y)
z = self.tidy_conv(y)
z = self.flatten(z)
return z, y
class NavigatorUnit(nn.Module):
"""
Navigator init.
"""
def __init__(self):
super(NavigatorUnit, self).__init__()
self.branch1 = NavigatorBranch(
in_channels=2048,
out_channels=6,
stride=1)
self.branch2 = NavigatorBranch(
in_channels=128,
out_channels=6,
stride=2)
self.branch3 = NavigatorBranch(
in_channels=128,
out_channels=9,
stride=2)
def forward(self, x):
t1, x = self.branch1(x)
t2, x = self.branch2(x)
t3, _ = self.branch3(x)
return torch.cat((t1, t2, t3), dim=1)
class NTSNet(nn.Module):
"""
NTS-Net model from 'Learning to Navigate for Fine-grained Classification,' https://arxiv.org/abs/1809.00287.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
aux : bool, default False
Whether to output auxiliary results.
top_n : int, default 4
Number of extra top-K informative regions.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
backbone,
aux=False,
top_n=4,
in_channels=3,
in_size=(448, 448),
num_classes=200):
super(NTSNet, self).__init__()
assert (in_channels > 0)
self.in_size = in_size
self.num_classes = num_classes
pad_side = 224
pad_width = (pad_side, pad_side, pad_side, pad_side)
self.top_n = top_n
self.aux = aux
self.num_cat = 4
_, edge_anchors, _ = self._generate_default_anchor_maps()
self.edge_anchors = (edge_anchors + 224).astype(np.int)
self.edge_anchors = np.concatenate(
(self.edge_anchors.copy(), np.arange(0, len(self.edge_anchors)).reshape(-1, 1)), axis=1)
self.backbone = backbone
self.backbone_tail = nn.Sequential()
self.backbone_tail.add_module("final_pool", nn.AdaptiveAvgPool2d(1))
self.backbone_tail.add_module("flatten", Flatten())
self.backbone_tail.add_module("dropout", nn.Dropout(p=0.5))
self.backbone_classifier = nn.Linear(
in_features=(512 * 4),
out_features=num_classes)
self.pad = nn.ZeroPad2d(padding=pad_width)
self.navigator_unit = NavigatorUnit()
self.concat_net = nn.Linear(
in_features=(2048 * (self.num_cat + 1)),
out_features=num_classes)
if self.aux:
self.partcls_net = nn.Linear(
in_features=(512 * 4),
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
raw_pre_features = self.backbone(x)
rpn_score = self.navigator_unit(raw_pre_features)
all_cdds = [np.concatenate((y.reshape(-1, 1), self.edge_anchors.copy()), axis=1)
for y in rpn_score.detach().cpu().numpy()]
top_n_cdds = [hard_nms(y, top_n=self.top_n, iou_thresh=0.25) for y in all_cdds]
top_n_cdds = np.array(top_n_cdds)
top_n_index = top_n_cdds[:, :, -1].astype(np.int64)
top_n_index = torch.from_numpy(top_n_index).long().to(x.device)
top_n_prob = torch.gather(rpn_score, dim=1, index=top_n_index)
batch = x.size(0)
part_imgs = torch.zeros(batch, self.top_n, 3, 224, 224, dtype=x.dtype, device=x.device)
x_pad = self.pad(x)
for i in range(batch):
for j in range(self.top_n):
y0, x0, y1, x1 = tuple(top_n_cdds[i][j, 1:5].astype(np.int64))
part_imgs[i:i + 1, j] = F.interpolate(
input=x_pad[i:i + 1, :, y0:y1, x0:x1],
size=(224, 224),
mode="bilinear",
align_corners=True)
part_imgs = part_imgs.view(batch * self.top_n, 3, 224, 224)
part_features = self.backbone_tail(self.backbone(part_imgs.detach()))
part_feature = part_features.view(batch, self.top_n, -1)
part_feature = part_feature[:, :self.num_cat, :].contiguous()
part_feature = part_feature.view(batch, -1)
raw_features = self.backbone_tail(raw_pre_features.detach())
concat_out = torch.cat((part_feature, raw_features), dim=1)
concat_logits = self.concat_net(concat_out)
if self.aux:
raw_logits = self.backbone_classifier(raw_features)
part_logits = self.partcls_net(part_features).view(batch, self.top_n, -1)
return concat_logits, raw_logits, part_logits, top_n_prob
else:
return concat_logits
@staticmethod
def _generate_default_anchor_maps(input_shape=(448, 448)):
"""
Generate default anchor maps.
Parameters:
----------
input_shape : tuple of 2 int
Input image size.
Returns:
-------
center_anchors : np.array
anchors * 4 (oy, ox, h, w).
edge_anchors : np.array
anchors * 4 (y0, x0, y1, x1).
anchor_area : np.array
anchors * 1 (area).
"""
anchor_scale = [2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
anchor_aspect_ratio = [0.667, 1, 1.5]
anchors_setting = (
dict(layer="p3", stride=32, size=48, scale=anchor_scale, aspect_ratio=anchor_aspect_ratio),
dict(layer="p4", stride=64, size=96, scale=anchor_scale, aspect_ratio=anchor_aspect_ratio),
dict(layer="p5", stride=128, size=192, scale=[1, anchor_scale[0], anchor_scale[1]],
aspect_ratio=anchor_aspect_ratio),
)
center_anchors = np.zeros((0, 4), dtype=np.float32)
edge_anchors = np.zeros((0, 4), dtype=np.float32)
anchor_areas = np.zeros((0,), dtype=np.float32)
input_shape = np.array(input_shape, dtype=int)
for anchor_info in anchors_setting:
stride = anchor_info["stride"]
size = anchor_info["size"]
scales = anchor_info["scale"]
aspect_ratios = anchor_info["aspect_ratio"]
output_map_shape = np.ceil(input_shape.astype(np.float32) / stride)
output_map_shape = output_map_shape.astype(np.int)
output_shape = tuple(output_map_shape) + (4, )
ostart = stride / 2.0
oy = np.arange(ostart, ostart + stride * output_shape[0], stride)
oy = oy.reshape(output_shape[0], 1)
ox = np.arange(ostart, ostart + stride * output_shape[1], stride)
ox = ox.reshape(1, output_shape[1])
center_anchor_map_template = np.zeros(output_shape, dtype=np.float32)
center_anchor_map_template[:, :, 0] = oy
center_anchor_map_template[:, :, 1] = ox
for anchor_scale in scales:
for anchor_aspect_ratio in aspect_ratios:
center_anchor_map = center_anchor_map_template.copy()
center_anchor_map[:, :, 2] = size * anchor_scale / float(anchor_aspect_ratio) ** 0.5
center_anchor_map[:, :, 3] = size * anchor_scale * float(anchor_aspect_ratio) ** 0.5
edge_anchor_map = np.concatenate(
(center_anchor_map[:, :, :2] - center_anchor_map[:, :, 2:4] / 2.0,
center_anchor_map[:, :, :2] + center_anchor_map[:, :, 2:4] / 2.0),
axis=-1)
anchor_area_map = center_anchor_map[:, :, 2] * center_anchor_map[:, :, 3]
center_anchors = np.concatenate((center_anchors, center_anchor_map.reshape(-1, 4)))
edge_anchors = np.concatenate((edge_anchors, edge_anchor_map.reshape(-1, 4)))
anchor_areas = np.concatenate((anchor_areas, anchor_area_map.reshape(-1)))
return center_anchors, edge_anchors, anchor_areas
def get_ntsnet(backbone,
aux=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create NTS-Net model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
aux : bool, default False
Whether to output auxiliary results.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
net = NTSNet(
backbone=backbone,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def ntsnet_cub(pretrained_backbone=False, aux=True, **kwargs):
"""
NTS-Net model from 'Learning to Navigate for Fine-grained Classification,' https://arxiv.org/abs/1809.00287.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone).features
del backbone[-1]
return get_ntsnet(backbone=backbone, aux=aux, model_name="ntsnet_cub", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
aux = True
models = [
ntsnet_cub,
]
for model in models:
net = model(pretrained=pretrained, aux=aux)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != ntsnet_cub or weight_count == 29033133)
else:
assert (model != ntsnet_cub or weight_count == 28623333)
x = torch.randn(5, 3, 448, 448)
ys = net(x)
y = ys[0] if aux else ys
y.sum().backward()
assert (tuple(y.size()) == (5, 200))
if __name__ == "__main__":
_test()
| 14,019
| 32.54067
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/proxylessnas_cub.py
|
"""
ProxylessNAS for CUB-200-2011, implemented in Gluon.
Original paper: 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
"""
__all__ = ['proxylessnas_cpu_cub', 'proxylessnas_gpu_cub', 'proxylessnas_mobile_cub', 'proxylessnas_mobile14_cub']
from .proxylessnas import get_proxylessnas
def proxylessnas_cpu_cub(num_classes=200, **kwargs):
"""
ProxylessNAS (CPU) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and
Hardware,' https://arxiv.org/abs/1812.00332.
Parameters:
----------
num_classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(num_classes=num_classes, version="cpu", model_name="proxylessnas_cpu_cub", **kwargs)
def proxylessnas_gpu_cub(num_classes=200, **kwargs):
"""
ProxylessNAS (GPU) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and
Hardware,' https://arxiv.org/abs/1812.00332.
Parameters:
----------
num_classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(num_classes=num_classes, version="gpu", model_name="proxylessnas_gpu_cub", **kwargs)
def proxylessnas_mobile_cub(num_classes=200, **kwargs):
"""
ProxylessNAS (Mobile) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task
and Hardware,' https://arxiv.org/abs/1812.00332.
Parameters:
----------
num_classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(num_classes=num_classes, version="mobile", model_name="proxylessnas_mobile_cub", **kwargs)
def proxylessnas_mobile14_cub(num_classes=200, **kwargs):
"""
ProxylessNAS (Mobile-14) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task
and Hardware,' https://arxiv.org/abs/1812.00332.
Parameters:
----------
num_classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(num_classes=num_classes, version="mobile14", model_name="proxylessnas_mobile14_cub",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
proxylessnas_cpu_cub,
proxylessnas_gpu_cub,
proxylessnas_mobile_cub,
proxylessnas_mobile14_cub,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != proxylessnas_cpu_cub or weight_count == 3215248)
assert (model != proxylessnas_gpu_cub or weight_count == 5736648)
assert (model != proxylessnas_mobile_cub or weight_count == 3055712)
assert (model != proxylessnas_mobile14_cub or weight_count == 5423168)
x = torch.randn(14, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (14, 200))
if __name__ == "__main__":
_test()
| 4,155
| 32.788618
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/ibnresnet.py
|
"""
IBN-ResNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
"""
__all__ = ['IBNResNet', 'ibn_resnet50', 'ibn_resnet101', 'ibn_resnet152']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block, IBN
from .resnet import ResInitBlock
class IBNConvBlock(nn.Module):
"""
IBN-Net specific convolution block with BN/IBN normalization and ReLU activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_ibn : bool, default False
Whether use Instance-Batch Normalization.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_ibn=False,
activate=True):
super(IBNConvBlock, self).__init__()
self.activate = activate
self.use_ibn = use_ibn
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_ibn:
self.ibn = IBN(channels=out_channels)
else:
self.bn = nn.BatchNorm2d(num_features=out_channels)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.use_ibn:
x = self.ibn(x)
else:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def ibn_conv1x1_block(in_channels,
out_channels,
stride=1,
groups=1,
bias=False,
use_ibn=False,
activate=True):
"""
1x1 version of the IBN-Net specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_ibn : bool, default False
Whether use Instance-Batch Normalization.
activate : bool, default True
Whether activate the convolution block.
"""
return IBNConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
groups=groups,
bias=bias,
use_ibn=use_ibn,
activate=activate)
class IBNResBottleneck(nn.Module):
"""
IBN-ResNet bottleneck block for residual path in IBN-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
conv1_ibn):
super(IBNResBottleneck, self).__init__()
mid_channels = out_channels // 4
self.conv1 = ibn_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_ibn=conv1_ibn)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class IBNResUnit(nn.Module):
"""
IBN-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
conv1_ibn):
super(IBNResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = IBNResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
conv1_ibn=conv1_ibn)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class IBNResNet(nn.Module):
"""
IBN-ResNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(IBNResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
conv1_ibn = (out_channels < 2048)
stage.add_module("unit{}".format(j + 1), IBNResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
conv1_ibn=conv1_ibn))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_ibnresnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create IBN-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported IBN-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = IBNResNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def ibn_resnet50(**kwargs):
"""
IBN-ResNet-50 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_ibnresnet(blocks=50, model_name="ibn_resnet50", **kwargs)
def ibn_resnet101(**kwargs):
"""
IBN-ResNet-101 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_ibnresnet(blocks=101, model_name="ibn_resnet101", **kwargs)
def ibn_resnet152(**kwargs):
"""
IBN-ResNet-152 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_ibnresnet(blocks=152, model_name="ibn_resnet152", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
ibn_resnet50,
ibn_resnet101,
ibn_resnet152,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibn_resnet50 or weight_count == 25557032)
assert (model != ibn_resnet101 or weight_count == 44549160)
assert (model != ibn_resnet152 or weight_count == 60192808)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 12,570
| 29.002387
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/common.py
|
"""
Common routines for models in PyTorch.
"""
__all__ = ['round_channels', 'Identity', 'BreakBlock', 'Swish', 'HSigmoid', 'HSwish', 'get_activation_layer',
'SelectableDense', 'DenseBlock', 'ConvBlock1d', 'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'ConvBlock',
'conv1x1_block', 'conv3x3_block', 'conv5x5_block', 'conv7x7_block', 'dwconv_block', 'dwconv3x3_block',
'dwconv5x5_block', 'dwsconv3x3_block', 'PreConvBlock', 'pre_conv1x1_block', 'pre_conv3x3_block',
'AsymConvBlock', 'asym_conv3x3_block', 'DeconvBlock', 'deconv3x3_block', 'NormActivation',
'InterpolationBlock', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'SABlock', 'SAConvBlock',
'saconv3x3_block', 'DucBlock', 'IBN', 'DualPathSequential', 'Concurrent', 'SequentialConcurrent',
'ParametricSequential', 'ParametricConcurrent', 'Hourglass', 'SesquialteralHourglass',
'MultiOutputSequential', 'ParallelConcurent', 'DualPathParallelConcurent', 'Flatten', 'HeatmapMaxDetBlock']
import math
from inspect import isfunction
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
def round_channels(channels,
divisor=8):
"""
Round weighted channel number (make divisible operation).
Parameters:
----------
channels : int or float
Original number of channels.
divisor : int, default 8
Alignment value.
Returns:
-------
int
Weighted number of channels.
"""
rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if float(rounded_channels) < 0.9 * channels:
rounded_channels += divisor
return rounded_channels
class Identity(nn.Module):
"""
Identity block.
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def __repr__(self):
return '{name}()'.format(name=self.__class__.__name__)
class BreakBlock(nn.Module):
"""
Break coonnection block for hourglass.
"""
def __init__(self):
super(BreakBlock, self).__init__()
def forward(self, x):
return None
def __repr__(self):
return '{name}()'.format(name=self.__class__.__name__)
class Swish(nn.Module):
"""
Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.
"""
def forward(self, x):
return x * torch.sigmoid(x)
class HSigmoid(nn.Module):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
"""
def forward(self, x):
return F.relu6(x + 3.0, inplace=True) / 6.0
class HSwish(nn.Module):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
inplace : bool
Whether to use inplace version of the module.
"""
def __init__(self, inplace=False):
super(HSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_activation_layer(activation):
"""
Create activation layer from string/function.
Parameters:
----------
activation : function, or str, or nn.Module
Activation function or name of activation function.
Returns:
-------
nn.Module
Activation layer.
"""
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu":
return nn.ReLU(inplace=True)
elif activation == "relu6":
return nn.ReLU6(inplace=True)
elif activation == "swish":
return Swish()
elif activation == "hswish":
return HSwish(inplace=True)
elif activation == "sigmoid":
return nn.Sigmoid()
elif activation == "hsigmoid":
return HSigmoid()
elif activation == "identity":
return Identity()
else:
raise NotImplementedError()
else:
assert (isinstance(activation, nn.Module))
return activation
class SelectableDense(nn.Module):
"""
Selectable dense layer.
Parameters:
----------
in_features : int
Number of input features.
out_features : int
Number of output features.
bias : bool, default False
Whether the layer uses a bias vector.
num_options : int, default 1
Number of selectable options.
"""
def __init__(self,
in_features,
out_features,
bias=False,
num_options=1):
super(SelectableDense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.use_bias = bias
self.num_options = num_options
self.weight = Parameter(torch.Tensor(num_options, out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(num_options, out_features))
else:
self.register_parameter("bias", None)
def forward(self, x, indices):
weight = torch.index_select(self.weight, dim=0, index=indices)
x = x.unsqueeze(-1)
x = weight.bmm(x)
x = x.squeeze(dim=-1)
if self.use_bias:
bias = torch.index_select(self.bias, dim=0, index=indices)
x += bias
return x
def extra_repr(self):
return "in_features={}, out_features={}, bias={}, num_options={}".format(
self.in_features, self.out_features, self.use_bias, self.num_options)
class DenseBlock(nn.Module):
"""
Standard dense block with Batch normalization and activation.
Parameters:
----------
in_features : int
Number of input features.
out_features : int
Number of output features.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_features,
out_features,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(DenseBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.fc = nn.Linear(
in_features=in_features,
out_features=out_features,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm1d(
num_features=out_features,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
x = self.fc(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
class ConvBlock1d(nn.Module):
"""
Standard 1D convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
stride : int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(ConvBlock1d, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm1d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1(in_channels,
out_channels,
stride=1,
groups=1,
bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups,
bias=bias)
def conv3x3(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False):
"""
Convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
def depthwise_conv3x3(channels,
stride=1,
padding=1,
dilation=1,
bias=False):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=channels,
bias=bias)
class ConvBlock(nn.Module):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(ConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.use_pad = (isinstance(padding, (list, tuple)) and (len(padding) == 4))
if self.use_pad:
self.pad = nn.ZeroPad2d(padding=padding)
padding = 0
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
if self.use_pad:
x = self.pad(x)
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
padding=0,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
1x1 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
5x5 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv7x7_block(in_channels,
out_channels,
stride=1,
padding=3,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
7x7 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def dwconv_block(in_channels,
out_channels,
kernel_size,
stride=1,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
Depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def dwconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def dwconv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
5x5 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
activation=activation)
class DwsConvBlock(nn.Module):
"""
Depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
dw_use_bn=True,
pw_use_bn=True,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True))):
super(DwsConvBlock, self).__init__()
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=dw_use_bn,
bn_eps=bn_eps,
activation=dw_activation)
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
use_bn=pw_use_bn,
bn_eps=bn_eps,
activation=pw_activation)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
def dwsconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True)),
**kwargs):
"""
3x3 depthwise separable version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
"""
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
dw_activation=dw_activation,
pw_activation=pw_activation,
**kwargs)
class PreConvBlock(nn.Module):
"""
Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
use_bn=True,
return_preact=False,
activate=True):
super(PreConvBlock, self).__init__()
self.return_preact = return_preact
self.activate = activate
self.use_bn = use_bn
if self.use_bn:
self.bn = nn.BatchNorm2d(num_features=in_channels)
if self.activate:
self.activ = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
def forward(self, x):
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(in_channels,
out_channels,
stride=1,
bias=False,
use_bn=True,
return_preact=False,
activate=True):
"""
1x1 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
bias=bias,
use_bn=use_bn,
return_preact=return_preact,
activate=activate)
def pre_conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
use_bn=True,
return_preact=False,
activate=True):
"""
3x3 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=use_bn,
return_preact=return_preact,
activate=activate)
class AsymConvBlock(nn.Module):
"""
Asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
kernel_size : int
Convolution window size.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
lw_use_bn : bool, default True
Whether to use BatchNorm layer (leftwise convolution block).
rw_use_bn : bool, default True
Whether to use BatchNorm layer (rightwise convolution block).
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
lw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the leftwise convolution block.
rw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the rightwise convolution block.
"""
def __init__(self,
channels,
kernel_size,
padding,
dilation=1,
groups=1,
bias=False,
lw_use_bn=True,
rw_use_bn=True,
bn_eps=1e-5,
lw_activation=(lambda: nn.ReLU(inplace=True)),
rw_activation=(lambda: nn.ReLU(inplace=True))):
super(AsymConvBlock, self).__init__()
self.lw_conv = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(kernel_size, 1),
stride=1,
padding=(padding, 0),
dilation=(dilation, 1),
groups=groups,
bias=bias,
use_bn=lw_use_bn,
bn_eps=bn_eps,
activation=lw_activation)
self.rw_conv = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(1, kernel_size),
stride=1,
padding=(0, padding),
dilation=(1, dilation),
groups=groups,
bias=bias,
use_bn=rw_use_bn,
bn_eps=bn_eps,
activation=rw_activation)
def forward(self, x):
x = self.lw_conv(x)
x = self.rw_conv(x)
return x
def asym_conv3x3_block(padding=1,
**kwargs):
"""
3x3 asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
padding : int, default 1
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
lw_use_bn : bool, default True
Whether to use BatchNorm layer (leftwise convolution block).
rw_use_bn : bool, default True
Whether to use BatchNorm layer (rightwise convolution block).
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
lw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the leftwise convolution block.
rw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the rightwise convolution block.
"""
return AsymConvBlock(
kernel_size=3,
padding=padding,
**kwargs)
class DeconvBlock(nn.Module):
"""
Deconvolution block with batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the deconvolution.
padding : int or tuple/list of 2 int
Padding value for deconvolution layer.
ext_padding : tuple/list of 4 int, default None
Extra padding value for deconvolution layer.
out_padding : int or tuple/list of 2 int
Output padding value for deconvolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for deconvolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
ext_padding=None,
out_padding=0,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(DeconvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.use_pad = (ext_padding is not None)
if self.use_pad:
self.pad = nn.ZeroPad2d(padding=ext_padding)
self.conv = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=out_padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
if self.use_pad:
x = self.pad(x)
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def deconv3x3_block(padding=1,
out_padding=1,
**kwargs):
"""
3x3 version of the deconvolution block with batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the deconvolution.
padding : int or tuple/list of 2 int, default 1
Padding value for deconvolution layer.
ext_padding : tuple/list of 4 int, default None
Extra padding value for deconvolution layer.
out_padding : int or tuple/list of 2 int, default 1
Output padding value for deconvolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for deconvolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return DeconvBlock(
kernel_size=3,
padding=padding,
out_padding=out_padding,
**kwargs)
class NormActivation(nn.Module):
"""
Activation block with preliminary batch normalization. It's used by itself as the final block in PreResNet.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(NormActivation, self).__init__()
self.bn = nn.BatchNorm2d(
num_features=in_channels,
eps=bn_eps)
self.activ = get_activation_layer(activation)
def forward(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class InterpolationBlock(nn.Module):
"""
Interpolation upsampling block.
Parameters:
----------
scale_factor : int
Multiplier for spatial size.
out_size : tuple of 2 int, default None
Spatial size of the output tensor for the bilinear interpolation operation.
mode : str, default 'bilinear'
Algorithm used for upsampling.
align_corners : bool, default True
Whether to align the corner pixels of the input and output tensors.
up : bool, default True
Whether to upsample or downsample.
"""
def __init__(self,
scale_factor,
out_size=None,
mode="bilinear",
align_corners=True,
up=True):
super(InterpolationBlock, self).__init__()
self.scale_factor = scale_factor
self.out_size = out_size
self.mode = mode
self.align_corners = align_corners
self.up = up
def forward(self, x, size=None):
if (self.mode == "bilinear") or (size is not None):
out_size = self.calc_out_size(x) if size is None else size
return F.interpolate(
input=x,
size=out_size,
mode=self.mode,
align_corners=self.align_corners)
else:
return F.interpolate(
input=x,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
def calc_out_size(self, x):
if self.out_size is not None:
return self.out_size
if self.up:
return tuple(s * self.scale_factor for s in x.shape[2:])
else:
return tuple(s // self.scale_factor for s in x.shape[2:])
def __repr__(self):
s = '{name}(scale_factor={scale_factor}, out_size={out_size}, mode={mode}, align_corners={align_corners}, up={up})' # noqa
return s.format(
name=self.__class__.__name__,
scale_factor=self.scale_factor,
out_size=self.out_size,
mode=self.mode,
align_corners=self.align_corners,
up=self.up)
def calc_flops(self, x):
assert (x.shape[0] == 1)
if self.mode == "bilinear":
num_flops = 9 * x.numel()
else:
num_flops = 4 * x.numel()
num_macs = 0
return num_flops, num_macs
def channel_shuffle(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns:
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
# assert (channels % groups == 0)
channels_per_group = channels // groups
x = x.view(batch, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle(nn.Module):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle, self).__init__()
# assert (channels % groups == 0)
if channels % groups != 0:
raise ValueError("channels must be divisible by groups")
self.groups = groups
def forward(self, x):
return channel_shuffle(x, self.groups)
def __repr__(self):
s = "{name}(groups={groups})"
return s.format(
name=self.__class__.__name__,
groups=self.groups)
def channel_shuffle2(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083. The alternative version.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns:
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
# assert (channels % groups == 0)
channels_per_group = channels // groups
x = x.view(batch, channels_per_group, groups, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle2(nn.Module):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
The alternative version.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle2, self).__init__()
# assert (channels % groups == 0)
if channels % groups != 0:
raise ValueError("channels must be divisible by groups")
self.groups = groups
def forward(self, x):
return channel_shuffle2(x, self.groups)
class SEBlock(nn.Module):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
mid_channels : int or None, default None
Number of middle channels.
round_mid : bool, default False
Whether to round middle channel number (make divisible by 8).
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
activation : function, or str, or nn.Module, default 'relu'
Activation function after the first convolution.
out_activation : function, or str, or nn.Module, default 'sigmoid'
Activation function after the last convolution.
"""
def __init__(self,
channels,
reduction=16,
mid_channels=None,
round_mid=False,
use_conv=True,
mid_activation=(lambda: nn.ReLU(inplace=True)),
out_activation=(lambda: nn.Sigmoid())):
super(SEBlock, self).__init__()
self.use_conv = use_conv
if mid_channels is None:
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
if use_conv:
self.conv1 = conv1x1(
in_channels=channels,
out_channels=mid_channels,
bias=True)
else:
self.fc1 = nn.Linear(
in_features=channels,
out_features=mid_channels)
self.activ = get_activation_layer(mid_activation)
if use_conv:
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels,
bias=True)
else:
self.fc2 = nn.Linear(
in_features=mid_channels,
out_features=channels)
self.sigmoid = get_activation_layer(out_activation)
def forward(self, x):
w = self.pool(x)
if not self.use_conv:
w = w.view(x.size(0), -1)
w = self.conv1(w) if self.use_conv else self.fc1(w)
w = self.activ(w)
w = self.conv2(w) if self.use_conv else self.fc2(w)
w = self.sigmoid(w)
if not self.use_conv:
w = w.unsqueeze(2).unsqueeze(3)
x = x * w
return x
class SABlock(nn.Module):
"""
Split-Attention block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
out_channels : int
Number of output channels.
groups : int
Number of channel groups (cardinality, without radix).
radix : int
Number of splits within a cardinal group.
reduction : int, default 4
Squeeze reduction value.
min_channels : int, default 32
Minimal number of squeezed channels.
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
"""
def __init__(self,
out_channels,
groups,
radix,
reduction=4,
min_channels=32,
use_conv=True,
bn_eps=1e-5):
super(SABlock, self).__init__()
self.groups = groups
self.radix = radix
self.use_conv = use_conv
in_channels = out_channels * radix
mid_channels = max(in_channels // reduction, min_channels)
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
if use_conv:
self.conv1 = conv1x1(
in_channels=out_channels,
out_channels=mid_channels,
bias=True)
else:
self.fc1 = nn.Linear(
in_features=out_channels,
out_features=mid_channels)
self.bn = nn.BatchNorm2d(
num_features=mid_channels,
eps=bn_eps)
self.activ = nn.ReLU(inplace=True)
if use_conv:
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=in_channels,
bias=True)
else:
self.fc2 = nn.Linear(
in_features=mid_channels,
out_features=in_channels)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
batch, channels, height, width = x.size()
x = x.view(batch, self.radix, channels // self.radix, height, width)
w = x.sum(dim=1)
w = self.pool(w)
if not self.use_conv:
w = w.view(x.size(0), -1)
w = self.conv1(w) if self.use_conv else self.fc1(w)
w = self.bn(w)
w = self.activ(w)
w = self.conv2(w) if self.use_conv else self.fc2(w)
w = w.view(batch, self.groups, self.radix, -1)
w = torch.transpose(w, 1, 2).contiguous()
w = self.softmax(w)
w = w.view(batch, self.radix, -1, 1, 1)
x = x * w
x = x.sum(dim=1)
return x
class SAConvBlock(nn.Module):
"""
Split-Attention convolution block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
radix : int, default 2
Number of splits within a cardinal group.
reduction : int, default 4
Squeeze reduction value.
min_channels : int, default 32
Minimal number of squeezed channels.
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
radix=2,
reduction=4,
min_channels=32,
use_conv=True):
super(SAConvBlock, self).__init__()
self.conv = ConvBlock(
in_channels=in_channels,
out_channels=(out_channels * radix),
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=(groups * radix),
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
self.att = SABlock(
out_channels=out_channels,
groups=groups,
radix=radix,
reduction=reduction,
min_channels=min_channels,
use_conv=use_conv,
bn_eps=bn_eps)
def forward(self, x):
x = self.conv(x)
x = self.att(x)
return x
def saconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
**kwargs):
"""
3x3 version of the Split-Attention convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
"""
return SAConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
**kwargs)
class DucBlock(nn.Module):
"""
Dense Upsampling Convolution (DUC) block from 'Understanding Convolution for Semantic Segmentation,'
https://arxiv.org/abs/1702.08502.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
scale_factor : int
Multiplier for spatial size.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor):
super(DucBlock, self).__init__()
mid_channels = (scale_factor * scale_factor) * out_channels
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.pix_shuffle = nn.PixelShuffle(upscale_factor=scale_factor)
def forward(self, x):
x = self.conv(x)
x = self.pix_shuffle(x)
return x
class IBN(nn.Module):
"""
Instance-Batch Normalization block from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : int
Number of channels.
inst_fraction : float, default 0.5
The first fraction of channels for normalization.
inst_first : bool, default True
Whether instance normalization be on the first part of channels.
"""
def __init__(self,
channels,
first_fraction=0.5,
inst_first=True):
super(IBN, self).__init__()
self.inst_first = inst_first
h1_channels = int(math.floor(channels * first_fraction))
h2_channels = channels - h1_channels
self.split_sections = [h1_channels, h2_channels]
if self.inst_first:
self.inst_norm = nn.InstanceNorm2d(
num_features=h1_channels,
affine=True)
self.batch_norm = nn.BatchNorm2d(num_features=h2_channels)
else:
self.batch_norm = nn.BatchNorm2d(num_features=h1_channels)
self.inst_norm = nn.InstanceNorm2d(
num_features=h2_channels,
affine=True)
def forward(self, x):
x1, x2 = torch.split(x, split_size_or_sections=self.split_sections, dim=1)
if self.inst_first:
x1 = self.inst_norm(x1.contiguous())
x2 = self.batch_norm(x2.contiguous())
else:
x1 = self.batch_norm(x1.contiguous())
x2 = self.inst_norm(x2.contiguous())
x = torch.cat((x1, x2), dim=1)
return x
class DualPathSequential(nn.Sequential):
"""
A sequential container for modules with dual inputs/outputs.
Modules will be executed in the order they are added.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first modules with single input/output.
last_ordinals : int, default 0
Number of the final modules with single input/output.
dual_path_scheme : function
Scheme of dual path response for a module.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal module.
"""
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda module, x1, x2: module(x1, x2)),
dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2))):
super(DualPathSequential, self).__init__()
self.return_two = return_two
self.first_ordinals = first_ordinals
self.last_ordinals = last_ordinals
self.dual_path_scheme = dual_path_scheme
self.dual_path_scheme_ordinal = dual_path_scheme_ordinal
def forward(self, x1, x2=None):
length = len(self._modules.values())
for i, module in enumerate(self._modules.values()):
if (i < self.first_ordinals) or (i >= length - self.last_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(module, x1, x2)
else:
x1, x2 = self.dual_path_scheme(module, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class Concurrent(nn.Sequential):
"""
A container for concatenation of modules on the base of the sequential container.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
merge_type : str, default None
Type of branch merging.
"""
def __init__(self,
axis=1,
stack=False,
merge_type=None):
super(Concurrent, self).__init__()
assert (merge_type is None) or (merge_type in ["cat", "stack", "sum"])
self.axis = axis
if merge_type is not None:
self.merge_type = merge_type
else:
self.merge_type = "stack" if stack else "cat"
def forward(self, x):
out = []
for module in self._modules.values():
out.append(module(x))
if self.merge_type == "stack":
out = torch.stack(tuple(out), dim=self.axis)
elif self.merge_type == "cat":
out = torch.cat(tuple(out), dim=self.axis)
elif self.merge_type == "sum":
out = torch.stack(tuple(out), dim=self.axis).sum(self.axis)
else:
raise NotImplementedError()
return out
class SequentialConcurrent(nn.Sequential):
"""
A sequential container with concatenated outputs.
Modules will be executed in the order they are added.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
cat_input : bool, default True
Whether to concatenate input tensor.
"""
def __init__(self,
axis=1,
stack=False,
cat_input=True):
super(SequentialConcurrent, self).__init__()
self.axis = axis
self.stack = stack
self.cat_input = cat_input
def forward(self, x):
out = [x] if self.cat_input else []
for module in self._modules.values():
x = module(x)
out.append(x)
if self.stack:
out = torch.stack(tuple(out), dim=self.axis)
else:
out = torch.cat(tuple(out), dim=self.axis)
return out
class ParametricSequential(nn.Sequential):
"""
A sequential container for modules with parameters.
Modules will be executed in the order they are added.
"""
def __init__(self, *args):
super(ParametricSequential, self).__init__(*args)
def forward(self, x, **kwargs):
for module in self._modules.values():
x = module(x, **kwargs)
return x
class ParametricConcurrent(nn.Sequential):
"""
A container for concatenation of modules with parameters.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
"""
def __init__(self, axis=1):
super(ParametricConcurrent, self).__init__()
self.axis = axis
def forward(self, x, **kwargs):
out = []
for module in self._modules.values():
out.append(module(x, **kwargs))
out = torch.cat(tuple(out), dim=self.axis)
return out
class Hourglass(nn.Module):
"""
A hourglass module.
Parameters:
----------
down_seq : nn.Sequential
Down modules as sequential.
up_seq : nn.Sequential
Up modules as sequential.
skip_seq : nn.Sequential
Skip connection modules as sequential.
merge_type : str, default 'add'
Type of concatenation of up and skip outputs.
return_first_skip : bool, default False
Whether return the first skip connection output. Used in ResAttNet.
"""
def __init__(self,
down_seq,
up_seq,
skip_seq,
merge_type="add",
return_first_skip=False):
super(Hourglass, self).__init__()
self.depth = len(down_seq)
assert (merge_type in ["cat", "add"])
assert (len(up_seq) == self.depth)
assert (len(skip_seq) in (self.depth, self.depth + 1))
self.merge_type = merge_type
self.return_first_skip = return_first_skip
self.extra_skip = (len(skip_seq) == self.depth + 1)
self.down_seq = down_seq
self.up_seq = up_seq
self.skip_seq = skip_seq
def _merge(self, x, y):
if y is not None:
if self.merge_type == "cat":
x = torch.cat((x, y), dim=1)
elif self.merge_type == "add":
x = x + y
return x
def forward(self, x, **kwargs):
y = None
down_outs = [x]
for down_module in self.down_seq._modules.values():
x = down_module(x)
down_outs.append(x)
for i in range(len(down_outs)):
if i != 0:
y = down_outs[self.depth - i]
skip_module = self.skip_seq[self.depth - i]
y = skip_module(y)
x = self._merge(x, y)
if i != len(down_outs) - 1:
if (i == 0) and self.extra_skip:
skip_module = self.skip_seq[self.depth]
x = skip_module(x)
up_module = self.up_seq[self.depth - 1 - i]
x = up_module(x)
if self.return_first_skip:
return x, y
else:
return x
class SesquialteralHourglass(nn.Module):
"""
A sesquialteral hourglass block.
Parameters:
----------
down1_seq : nn.Sequential
The first down modules as sequential.
skip1_seq : nn.Sequential
The first skip connection modules as sequential.
up_seq : nn.Sequential
Up modules as sequential.
skip2_seq : nn.Sequential
The second skip connection modules as sequential.
down2_seq : nn.Sequential
The second down modules as sequential.
merge_type : str, default 'cat'
Type of concatenation of up and skip outputs.
"""
def __init__(self,
down1_seq,
skip1_seq,
up_seq,
skip2_seq,
down2_seq,
merge_type="cat"):
super(SesquialteralHourglass, self).__init__()
assert (len(down1_seq) == len(up_seq))
assert (len(down1_seq) == len(down2_seq))
assert (len(skip1_seq) == len(skip2_seq))
assert (len(down1_seq) == len(skip1_seq) - 1)
assert (merge_type in ["cat", "add"])
self.merge_type = merge_type
self.depth = len(down1_seq)
self.down1_seq = down1_seq
self.skip1_seq = skip1_seq
self.up_seq = up_seq
self.skip2_seq = skip2_seq
self.down2_seq = down2_seq
def _merge(self, x, y):
if y is not None:
if self.merge_type == "cat":
x = torch.cat((x, y), dim=1)
elif self.merge_type == "add":
x = x + y
return x
def forward(self, x, **kwargs):
y = self.skip1_seq[0](x)
skip1_outs = [y]
for i in range(self.depth):
x = self.down1_seq[i](x)
y = self.skip1_seq[i + 1](x)
skip1_outs.append(y)
x = skip1_outs[self.depth]
y = self.skip2_seq[0](x)
skip2_outs = [y]
for i in range(self.depth):
x = self.up_seq[i](x)
y = skip1_outs[self.depth - 1 - i]
x = self._merge(x, y)
y = self.skip2_seq[i + 1](x)
skip2_outs.append(y)
x = self.skip2_seq[self.depth](x)
for i in range(self.depth):
x = self.down2_seq[i](x)
y = skip2_outs[self.depth - 1 - i]
x = self._merge(x, y)
return x
class MultiOutputSequential(nn.Sequential):
"""
A sequential container with multiple outputs.
Modules will be executed in the order they are added.
Parameters:
----------
multi_output : bool, default True
Whether to return multiple output.
dual_output : bool, default False
Whether to return dual output.
return_last : bool, default True
Whether to forcibly return last value.
"""
def __init__(self,
multi_output=True,
dual_output=False,
return_last=True):
super(MultiOutputSequential, self).__init__()
self.multi_output = multi_output
self.dual_output = dual_output
self.return_last = return_last
def forward(self, x):
outs = []
for module in self._modules.values():
x = module(x)
if hasattr(module, "do_output") and module.do_output:
outs.append(x)
elif hasattr(module, "do_output2") and module.do_output2:
assert (type(x) == tuple)
outs.extend(x[1])
x = x[0]
if self.multi_output:
return [x] + outs if self.return_last else outs
elif self.dual_output:
return x, outs
else:
return x
class ParallelConcurent(nn.Sequential):
"""
A sequential container with multiple inputs and single/multiple outputs.
Modules will be executed in the order they are added.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
merge_type : str, default 'list'
Type of branch merging.
"""
def __init__(self,
axis=1,
merge_type="list"):
super(ParallelConcurent, self).__init__()
assert (merge_type is None) or (merge_type in ["list", "cat", "stack", "sum"])
self.axis = axis
self.merge_type = merge_type
def forward(self, x):
out = []
for module, xi in zip(self._modules.values(), x):
out.append(module(xi))
if self.merge_type == "list":
pass
elif self.merge_type == "stack":
out = torch.stack(tuple(out), dim=self.axis)
elif self.merge_type == "cat":
out = torch.cat(tuple(out), dim=self.axis)
elif self.merge_type == "sum":
out = torch.stack(tuple(out), dim=self.axis).sum(self.axis)
else:
raise NotImplementedError()
return out
class DualPathParallelConcurent(nn.Sequential):
"""
A sequential container with multiple dual-path inputs and single/multiple outputs.
Modules will be executed in the order they are added.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
merge_type : str, default 'list'
Type of branch merging.
"""
def __init__(self,
axis=1,
merge_type="list"):
super(DualPathParallelConcurent, self).__init__()
assert (merge_type is None) or (merge_type in ["list", "cat", "stack", "sum"])
self.axis = axis
self.merge_type = merge_type
def forward(self, x1, x2):
x1_out = []
x2_out = []
for module, x1i, x2i in zip(self._modules.values(), x1, x2):
y1i, y2i = module(x1i, x2i)
x1_out.append(y1i)
x2_out.append(y2i)
if self.merge_type == "list":
pass
elif self.merge_type == "stack":
x1_out = torch.stack(tuple(x1_out), dim=self.axis)
x2_out = torch.stack(tuple(x2_out), dim=self.axis)
elif self.merge_type == "cat":
x1_out = torch.cat(tuple(x1_out), dim=self.axis)
x2_out = torch.cat(tuple(x2_out), dim=self.axis)
elif self.merge_type == "sum":
x1_out = torch.stack(tuple(x1_out), dim=self.axis).sum(self.axis)
x2_out = torch.stack(tuple(x2_out), dim=self.axis).sum(self.axis)
else:
raise NotImplementedError()
return x1_out, x2_out
class Flatten(nn.Module):
"""
Simple flatten module.
"""
def forward(self, x):
return x.view(x.size(0), -1)
class HeatmapMaxDetBlock(nn.Module):
"""
Heatmap maximum detector block (for human pose estimation task).
"""
def __init__(self):
super(HeatmapMaxDetBlock, self).__init__()
def forward(self, x):
heatmap = x
vector_dim = 2
batch = heatmap.shape[0]
channels = heatmap.shape[1]
in_size = x.shape[2:]
heatmap_vector = heatmap.view(batch, channels, -1)
scores, indices = heatmap_vector.max(dim=vector_dim, keepdims=True)
scores_mask = (scores > 0.0).float()
pts_x = (indices % in_size[1]) * scores_mask
pts_y = (indices // in_size[1]) * scores_mask
pts = torch.cat((pts_x, pts_y, scores), dim=vector_dim)
for b in range(batch):
for k in range(channels):
hm = heatmap[b, k, :, :]
px = int(pts[b, k, 0])
py = int(pts[b, k, 1])
if (0 < px < in_size[1] - 1) and (0 < py < in_size[0] - 1):
pts[b, k, 0] += (hm[py, px + 1] - hm[py, px - 1]).sign() * 0.25
pts[b, k, 1] += (hm[py + 1, px] - hm[py - 1, px]).sign() * 0.25
return pts
@staticmethod
def calc_flops(x):
assert (x.shape[0] == 1)
num_flops = x.numel() + 26 * x.shape[1]
num_macs = 0
return num_flops, num_macs
| 74,363
| 30.902188
| 130
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/lwopenpose_cmupan.py
|
"""
Lightweight OpenPose 2D/3D for CMU Panoptic, implemented in PyTorch.
Original paper: 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,'
https://arxiv.org/abs/1811.12004.
"""
__all__ = ['LwOpenPose', 'lwopenpose2d_mobilenet_cmupan_coco', 'lwopenpose3d_mobilenet_cmupan_coco',
'LwopDecoderFinalBlock']
import os
import torch
from torch import nn
from .common import conv1x1, conv1x1_block, conv3x3_block, dwsconv3x3_block
class LwopResBottleneck(nn.Module):
"""
Bottleneck block for residual path in the residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bias : bool, default True
Whether the layer uses a bias vector.
bottleneck_factor : int, default 2
Bottleneck factor.
squeeze_out : bool, default False
Whether to squeeze the output channels.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bias=True,
bottleneck_factor=2,
squeeze_out=False):
super(LwopResBottleneck, self).__init__()
mid_channels = out_channels // bottleneck_factor if squeeze_out else in_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bias=bias)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
bias=bias)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bias=bias,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class LwopResUnit(nn.Module):
"""
ResNet-like residual unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
bias : bool, default True
Whether the layer uses a bias vector.
bottleneck_factor : int, default 2
Bottleneck factor.
squeeze_out : bool, default False
Whether to squeeze the output channels.
activate : bool, default False
Whether to activate the sum.
"""
def __init__(self,
in_channels,
out_channels,
stride=1,
bias=True,
bottleneck_factor=2,
squeeze_out=False,
activate=False):
super(LwopResUnit, self).__init__()
self.activate = activate
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = LwopResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
bottleneck_factor=bottleneck_factor,
squeeze_out=squeeze_out)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=bias,
activation=None)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
if self.activate:
x = self.activ(x)
return x
class LwopEncoderFinalBlock(nn.Module):
"""
Lightweight OpenPose 2D/3D specific encoder final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(LwopEncoderFinalBlock, self).__init__()
self.pre_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=True,
use_bn=False)
self.body = nn.Sequential()
for i in range(3):
self.body.add_module("block{}".format(i + 1), dwsconv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
dw_use_bn=False,
pw_use_bn=False,
dw_activation=(lambda: nn.ELU(inplace=True)),
pw_activation=(lambda: nn.ELU(inplace=True))))
self.post_conv = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bias=True,
use_bn=False)
def forward(self, x):
x = self.pre_conv(x)
x = x + self.body(x)
x = self.post_conv(x)
return x
class LwopRefinementBlock(nn.Module):
"""
Lightweight OpenPose 2D/3D specific refinement block for decoder units.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(LwopRefinementBlock, self).__init__()
self.pre_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=True,
use_bn=False)
self.body = nn.Sequential()
self.body.add_module("block1", conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bias=True))
self.body.add_module("block2", conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
padding=2,
dilation=2,
bias=True))
def forward(self, x):
x = self.pre_conv(x)
x = x + self.body(x)
return x
class LwopDecoderBend(nn.Module):
"""
Lightweight OpenPose 2D/3D specific decoder bend block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels):
super(LwopDecoderBend, self).__init__()
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bias=True,
use_bn=False)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
bias=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class LwopDecoderInitBlock(nn.Module):
"""
Lightweight OpenPose 2D/3D specific decoder init block.
Parameters:
----------
in_channels : int
Number of input channels.
keypoints : int
Number of keypoints.
"""
def __init__(self,
in_channels,
keypoints):
super(LwopDecoderInitBlock, self).__init__()
num_heatmap = keypoints
num_paf = 2 * keypoints
bend_mid_channels = 512
self.body = nn.Sequential()
for i in range(3):
self.body.add_module("block{}".format(i + 1), conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
bias=True,
use_bn=False))
self.heatmap_bend = LwopDecoderBend(
in_channels=in_channels,
mid_channels=bend_mid_channels,
out_channels=num_heatmap)
self.paf_bend = LwopDecoderBend(
in_channels=in_channels,
mid_channels=bend_mid_channels,
out_channels=num_paf)
def forward(self, x):
y = self.body(x)
heatmap = self.heatmap_bend(y)
paf = self.paf_bend(y)
y = torch.cat((x, heatmap, paf), dim=1)
return y
class LwopDecoderUnit(nn.Module):
"""
Lightweight OpenPose 2D/3D specific decoder init.
Parameters:
----------
in_channels : int
Number of input channels.
keypoints : int
Number of keypoints.
"""
def __init__(self,
in_channels,
keypoints):
super(LwopDecoderUnit, self).__init__()
num_heatmap = keypoints
num_paf = 2 * keypoints
self.features_channels = in_channels - num_heatmap - num_paf
self.body = nn.Sequential()
for i in range(5):
self.body.add_module("block{}".format(i + 1), LwopRefinementBlock(
in_channels=in_channels,
out_channels=self.features_channels))
in_channels = self.features_channels
self.heatmap_bend = LwopDecoderBend(
in_channels=self.features_channels,
mid_channels=self.features_channels,
out_channels=num_heatmap)
self.paf_bend = LwopDecoderBend(
in_channels=self.features_channels,
mid_channels=self.features_channels,
out_channels=num_paf)
def forward(self, x):
features = x[:, :self.features_channels]
y = self.body(x)
heatmap = self.heatmap_bend(y)
paf = self.paf_bend(y)
y = torch.cat((features, heatmap, paf), dim=1)
return y
class LwopDecoderFeaturesBend(nn.Module):
"""
Lightweight OpenPose 2D/3D specific decoder 3D features bend.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels):
super(LwopDecoderFeaturesBend, self).__init__()
self.body = nn.Sequential()
for i in range(2):
self.body.add_module("block{}".format(i + 1), LwopRefinementBlock(
in_channels=in_channels,
out_channels=mid_channels))
in_channels = mid_channels
self.features_bend = LwopDecoderBend(
in_channels=mid_channels,
mid_channels=mid_channels,
out_channels=out_channels)
def forward(self, x):
x = self.body(x)
x = self.features_bend(x)
return x
class LwopDecoderFinalBlock(nn.Module):
"""
Lightweight OpenPose 2D/3D specific decoder final block for calcualation 3D poses.
Parameters:
----------
in_channels : int
Number of input channels.
keypoints : int
Number of keypoints.
bottleneck_factor : int
Bottleneck factor.
calc_3d_features : bool
Whether to calculate 3D features.
"""
def __init__(self,
in_channels,
keypoints,
bottleneck_factor,
calc_3d_features):
super(LwopDecoderFinalBlock, self).__init__()
self.num_heatmap_paf = 3 * keypoints
self.calc_3d_features = calc_3d_features
features_out_channels = self.num_heatmap_paf
features_in_channels = in_channels - features_out_channels
if self.calc_3d_features:
self.body = nn.Sequential()
for i in range(5):
self.body.add_module("block{}".format(i + 1), LwopResUnit(
in_channels=in_channels,
out_channels=features_in_channels,
bottleneck_factor=bottleneck_factor))
in_channels = features_in_channels
self.features_bend = LwopDecoderFeaturesBend(
in_channels=features_in_channels,
mid_channels=features_in_channels,
out_channels=features_out_channels)
def forward(self, x):
heatmap_paf_2d = x[:, -self.num_heatmap_paf:]
if not self.calc_3d_features:
return heatmap_paf_2d
x = self.body(x)
x = self.features_bend(x)
y = torch.cat((heatmap_paf_2d, x), dim=1)
return y
class LwOpenPose(nn.Module):
"""
Lightweight OpenPose 2D/3D model from 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,'
https://arxiv.org/abs/1811.12004.
Parameters:
----------
encoder_channels : list of list of int
Number of output channels for each encoder unit.
encoder_paddings : list of list of int
Padding/dilation value for each encoder unit.
encoder_init_block_channels : int
Number of output channels for the encoder initial unit.
encoder_final_block_channels : int
Number of output channels for the encoder final unit.
refinement_units : int
Number of refinement blocks in the decoder.
calc_3d_features : bool
Whether to calculate 3D features.
return_heatmap : bool, default True
Whether to return only heatmap.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 192)
Spatial size of the expected input image.
keypoints : int, default 19
Number of keypoints.
"""
def __init__(self,
encoder_channels,
encoder_paddings,
encoder_init_block_channels,
encoder_final_block_channels,
refinement_units,
calc_3d_features,
return_heatmap=True,
in_channels=3,
in_size=(368, 368),
keypoints=19):
super(LwOpenPose, self).__init__()
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.return_heatmap = return_heatmap
self.calc_3d_features = calc_3d_features
num_heatmap_paf = 3 * keypoints
self.encoder = nn.Sequential()
backbone = nn.Sequential()
backbone.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=encoder_init_block_channels,
stride=2))
in_channels = encoder_init_block_channels
for i, channels_per_stage in enumerate(encoder_channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
padding = encoder_paddings[i][j]
stage.add_module("unit{}".format(j + 1), dwsconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=padding))
in_channels = out_channels
backbone.add_module("stage{}".format(i + 1), stage)
self.encoder.add_module("backbone", backbone)
self.encoder.add_module("final_block", LwopEncoderFinalBlock(
in_channels=in_channels,
out_channels=encoder_final_block_channels))
in_channels = encoder_final_block_channels
self.decoder = nn.Sequential()
self.decoder.add_module("init_block", LwopDecoderInitBlock(
in_channels=in_channels,
keypoints=keypoints))
in_channels = encoder_final_block_channels + num_heatmap_paf
for i in range(refinement_units):
self.decoder.add_module("unit{}".format(i + 1), LwopDecoderUnit(
in_channels=in_channels,
keypoints=keypoints))
self.decoder.add_module("final_block", LwopDecoderFinalBlock(
in_channels=in_channels,
keypoints=keypoints,
bottleneck_factor=2,
calc_3d_features=calc_3d_features))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
if self.return_heatmap:
return x
else:
return x
def get_lwopenpose(calc_3d_features,
keypoints,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create Lightweight OpenPose 2D/3D model with specific parameters.
Parameters:
----------
calc_3d_features : bool, default False
Whether to calculate 3D features.
keypoints : int
Number of keypoints.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
encoder_channels = [[64], [128, 128], [256, 256, 512, 512, 512, 512, 512, 512]]
encoder_paddings = [[1], [1, 1], [1, 1, 1, 2, 1, 1, 1, 1]]
encoder_init_block_channels = 32
encoder_final_block_channels = 128
refinement_units = 1
net = LwOpenPose(
encoder_channels=encoder_channels,
encoder_paddings=encoder_paddings,
encoder_init_block_channels=encoder_init_block_channels,
encoder_final_block_channels=encoder_final_block_channels,
refinement_units=refinement_units,
calc_3d_features=calc_3d_features,
keypoints=keypoints,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def lwopenpose2d_mobilenet_cmupan_coco(keypoints=19, **kwargs):
"""
Lightweight OpenPose 2D model on the base of MobileNet for CMU Panoptic from 'Real-time 2D Multi-Person Pose
Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004.
Parameters:
----------
keypoints : int, default 19
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_lwopenpose(calc_3d_features=False, keypoints=keypoints, model_name="lwopenpose2d_mobilenet_cmupan_coco",
**kwargs)
def lwopenpose3d_mobilenet_cmupan_coco(keypoints=19, **kwargs):
"""
Lightweight OpenPose 3D model on the base of MobileNet for CMU Panoptic from 'Real-time 2D Multi-Person Pose
Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004.
Parameters:
----------
keypoints : int, default 19
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_lwopenpose(calc_3d_features=True, keypoints=keypoints, model_name="lwopenpose3d_mobilenet_cmupan_coco",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
in_size = (368, 368)
keypoints = 19
return_heatmap = True
pretrained = False
models = [
(lwopenpose2d_mobilenet_cmupan_coco, "2d"),
(lwopenpose3d_mobilenet_cmupan_coco, "3d"),
]
for model, model_dim in models:
net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != lwopenpose2d_mobilenet_cmupan_coco or weight_count == 4091698)
assert (model != lwopenpose3d_mobilenet_cmupan_coco or weight_count == 5085983)
batch = 1
x = torch.randn(batch, 3, in_size[0], in_size[1])
y = net(x)
# y.sum().backward()
if model_dim == "2d":
assert (tuple(y.size()) == (batch, 3 * keypoints, in_size[0] // 8, in_size[0] // 8))
else:
assert (tuple(y.size()) == (batch, 6 * keypoints, in_size[0] // 8, in_size[0] // 8))
if __name__ == "__main__":
_test()
| 21,152
| 31.643519
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/rir_cifar.py
|
"""
RiR for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'Resnet in Resnet: Generalizing Residual Architectures,' https://arxiv.org/abs/1603.08029.
"""
__all__ = ['CIFARRiR', 'rir_cifar10', 'rir_cifar100', 'rir_svhn', 'RiRFinalBlock']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, conv3x3, conv1x1_block, conv3x3_block, DualPathSequential
class PostActivation(nn.Module):
"""
Pure pre-activation block without convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
"""
def __init__(self,
in_channels):
super(PostActivation, self).__init__()
self.bn = nn.BatchNorm2d(num_features=in_channels)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class RiRUnit(nn.Module):
"""
RiR unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride):
super(RiRUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.res_pass_conv = conv3x3(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.trans_pass_conv = conv3x3(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.res_cross_conv = conv3x3(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.trans_cross_conv = conv3x3(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.res_postactiv = PostActivation(in_channels=out_channels)
self.trans_postactiv = PostActivation(in_channels=out_channels)
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
def forward(self, x_res, x_trans):
if self.resize_identity:
x_res_identity = self.identity_conv(x_res)
else:
x_res_identity = x_res
y_res = self.res_cross_conv(x_res)
y_trans = self.trans_cross_conv(x_trans)
x_res = self.res_pass_conv(x_res)
x_trans = self.trans_pass_conv(x_trans)
x_res = x_res + x_res_identity + y_trans
x_trans = x_trans + y_res
x_res = self.res_postactiv(x_res)
x_trans = self.trans_postactiv(x_trans)
return x_res, x_trans
class RiRInitBlock(nn.Module):
"""
RiR initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(RiRInitBlock, self).__init__()
self.res_conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels)
self.trans_conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels)
def forward(self, x, _):
x_res = self.res_conv(x)
x_trans = self.trans_conv(x)
return x_res, x_trans
class RiRFinalBlock(nn.Module):
"""
RiR final block.
"""
def __init__(self):
super(RiRFinalBlock, self).__init__()
def forward(self, x_res, x_trans):
x = torch.cat((x_res, x_trans), dim=1)
return x, None
class CIFARRiR(nn.Module):
"""
RiR model for CIFAR from 'Resnet in Resnet: Generalizing Residual Architectures,' https://arxiv.org/abs/1603.08029.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARRiR, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = DualPathSequential(
return_two=False,
first_ordinals=0,
last_ordinals=0)
self.features.add_module("init_block", RiRInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), RiRUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", RiRFinalBlock())
in_channels = final_block_channels
self.output = nn.Sequential()
self.output.add_module("final_conv", conv1x1_block(
in_channels=in_channels,
out_channels=num_classes,
activation=None))
self.output.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.output(x)
x = x.view(x.size(0), -1)
return x
def get_rir_cifar(num_classes,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create RiR model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels = [[48, 48, 48, 48], [96, 96, 96, 96, 96, 96], [192, 192, 192, 192, 192, 192]]
init_block_channels = 48
final_block_channels = 384
net = CIFARRiR(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def rir_cifar10(num_classes=10, **kwargs):
"""
RiR model for CIFAR-10 from 'Resnet in Resnet: Generalizing Residual Architectures,'
https://arxiv.org/abs/1603.08029.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_rir_cifar(num_classes=num_classes, model_name="rir_cifar10", **kwargs)
def rir_cifar100(num_classes=100, **kwargs):
"""
RiR model for CIFAR-100 from 'Resnet in Resnet: Generalizing Residual Architectures,'
https://arxiv.org/abs/1603.08029.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_rir_cifar(num_classes=num_classes, model_name="rir_cifar100", **kwargs)
def rir_svhn(num_classes=10, **kwargs):
"""
RiR model for SVHN from 'Resnet in Resnet: Generalizing Residual Architectures,'
https://arxiv.org/abs/1603.08029.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_rir_cifar(num_classes=num_classes, model_name="rir_svhn", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(rir_cifar10, 10),
(rir_cifar100, 100),
(rir_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != rir_cifar10 or weight_count == 9492980)
assert (model != rir_cifar100 or weight_count == 9527720)
assert (model != rir_svhn or weight_count == 9492980)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes))
if __name__ == "__main__":
_test()
| 10,658
| 29.454286
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/unet.py
|
"""
U-Net for image segmentation, implemented in PyTorch.
Original paper: 'U-Net: Convolutional Networks for Biomedical Image Segmentation,'
https://arxiv.org/abs/1505.04597.
"""
__all__ = ['UNet', 'unet_cityscapes']
import os
import torch
import torch.nn as nn
from .common import conv1x1, conv3x3_block, InterpolationBlock, Hourglass, Identity
class UNetBlock(nn.Module):
"""
U-Net specific base block (double convolution).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bias : bool
Whether the layer uses a bias vector.
"""
def __init__(self,
in_channels,
out_channels,
bias):
super(UNetBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class UNetDownStage(nn.Module):
"""
U-Net specific downscale (encoder) stage.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bias : bool
Whether the layer uses a bias vector.
"""
def __init__(self,
in_channels,
out_channels,
bias):
super(UNetDownStage, self).__init__()
self.pool = nn.MaxPool2d(kernel_size=2)
self.conv = UNetBlock(
in_channels=in_channels,
out_channels=out_channels,
bias=bias)
def forward(self, x):
x = self.pool(x)
x = self.conv(x)
return x
class UNetUpStage(nn.Module):
"""
U-Net specific upscale (decoder) stage.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bias : bool
Whether the layer uses a bias vector.
"""
def __init__(self,
in_channels,
out_channels,
bias):
super(UNetUpStage, self).__init__()
self.conv = UNetBlock(
in_channels=in_channels,
out_channels=out_channels,
bias=bias)
self.up = InterpolationBlock(
scale_factor=2,
align_corners=True)
def forward(self, x):
x = self.conv(x)
x = self.up(x)
return x
class UNetHead(nn.Module):
"""
U-Net specific head.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bias : bool
Whether the layer uses a bias vector.
"""
def __init__(self,
in_channels,
out_channels,
bias):
super(UNetHead, self).__init__()
mid_channels = in_channels // 2
self.conv1 = UNetBlock(
in_channels=in_channels,
out_channels=mid_channels,
bias=bias)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
bias=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class UNet(nn.Module):
"""
U-Net model from 'U-Net: Convolutional Networks for Biomedical Image Segmentation,'
https://arxiv.org/abs/1505.04597.
Parameters:
----------
channels : list of list of int
Number of output channels for each stage in encoder and decoder.
init_block_channels : int
Number of output channels for the initial unit.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
num_classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
channels,
init_block_channels,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
num_classes=19):
super(UNet, self).__init__()
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.num_classes = num_classes
self.fixed_size = fixed_size
bias = True
self.stem = UNetBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bias=bias)
in_channels = init_block_channels
down_seq = nn.Sequential()
skip_seq = nn.Sequential()
for i, out_channels in enumerate(channels[0]):
down_seq.add_module("down{}".format(i + 1), UNetDownStage(
in_channels=in_channels,
out_channels=out_channels,
bias=bias))
in_channels = out_channels
skip_seq.add_module("skip{}".format(i + 1), Identity())
up_seq = nn.Sequential()
for i, out_channels in enumerate(channels[1]):
if i == 0:
up_seq.add_module("down{}".format(i + 1), InterpolationBlock(
scale_factor=2,
align_corners=True))
else:
up_seq.add_module("down{}".format(i + 1), UNetUpStage(
in_channels=(2 * in_channels),
out_channels=out_channels,
bias=bias))
in_channels = out_channels
up_seq = up_seq[::-1]
self.hg = Hourglass(
down_seq=down_seq,
up_seq=up_seq,
skip_seq=skip_seq,
merge_type="cat")
self.head = UNetHead(
in_channels=(2 * in_channels),
out_channels=num_classes,
bias=True)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.stem(x)
x = self.hg(x)
x = self.head(x)
return x
def get_unet(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create U-Net model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels = [[128, 256, 512, 512], [512, 256, 128, 64]]
init_block_channels = 64
net = UNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def unet_cityscapes(num_classes=19, **kwargs):
"""
U-Net model for Cityscapes from 'U-Net: Convolutional Networks for Biomedical Image Segmentation,'
https://arxiv.org/abs/1505.04597.
Parameters:
----------
num_classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_unet(num_classes=num_classes, model_name="unet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
pretrained = False
fixed_size = True
in_size = (1024, 2048)
classes = 19
models = [
unet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != unet_cityscapes or weight_count == 13396499)
batch = 4
x = torch.randn(batch, 3, in_size[0], in_size[1])
y = net(x)
# y.sum().backward()
assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 9,378
| 27.335347
| 115
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.