repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/seresnet_cifar.py
""" SE-ResNet for CIFAR/SVHN, implemented in Chainer. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['CIFARSEResNet', 'seresnet20_cifar10', 'seresnet20_cifar100', 'seresnet20_svhn', 'seresnet56_cifar10', 'seresnet56_cifar100', 'seresnet56_svhn', 'seresnet110_cifar10', 'seresnet110_cifar100', 'seresnet110_svhn', 'seresnet164bn_cifar10', 'seresnet164bn_cifar100', 'seresnet164bn_svhn', 'seresnet272bn_cifar10', 'seresnet272bn_cifar100', 'seresnet272bn_svhn', 'seresnet542bn_cifar10', 'seresnet542bn_cifar100', 'seresnet542bn_svhn', 'seresnet1001_cifar10', 'seresnet1001_cifar100', 'seresnet1001_svhn', 'seresnet1202_cifar10', 'seresnet1202_cifar100', 'seresnet1202_svhn'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3_block, SimpleSequential from .seresnet import SEResUnit class CIFARSEResNet(Chain): """ SE-ResNet model for CIFAR from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), classes=10): super(CIFARSEResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), SEResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_seresnet_cifar(classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create SE-ResNet model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARSEResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def seresnet20_cifar10(classes=10, **kwargs): """ SE-ResNet-20 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_cifar10", **kwargs) def seresnet20_cifar100(classes=100, **kwargs): """ SE-ResNet-20 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_cifar100", **kwargs) def seresnet20_svhn(classes=10, **kwargs): """ SE-ResNet-20 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_svhn", **kwargs) def seresnet56_cifar10(classes=10, **kwargs): """ SE-ResNet-56 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_cifar10", **kwargs) def seresnet56_cifar100(classes=100, **kwargs): """ SE-ResNet-56 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_cifar100", **kwargs) def seresnet56_svhn(classes=10, **kwargs): """ SE-ResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_svhn", **kwargs) def seresnet110_cifar10(classes=10, **kwargs): """ SE-ResNet-110 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_cifar10", **kwargs) def seresnet110_cifar100(classes=100, **kwargs): """ SE-ResNet-110 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_cifar100", **kwargs) def seresnet110_svhn(classes=10, **kwargs): """ SE-ResNet-110 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_svhn", **kwargs) def seresnet164bn_cifar10(classes=10, **kwargs): """ SE-ResNet-164(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_cifar10", **kwargs) def seresnet164bn_cifar100(classes=100, **kwargs): """ SE-ResNet-164(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_cifar100", **kwargs) def seresnet164bn_svhn(classes=10, **kwargs): """ SE-ResNet-164(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_svhn", **kwargs) def seresnet272bn_cifar10(classes=10, **kwargs): """ SE-ResNet-272(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_cifar10", **kwargs) def seresnet272bn_cifar100(classes=100, **kwargs): """ SE-ResNet-272(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_cifar100", **kwargs) def seresnet272bn_svhn(classes=10, **kwargs): """ SE-ResNet-272(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_svhn", **kwargs) def seresnet542bn_cifar10(classes=10, **kwargs): """ SE-ResNet-542(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_cifar10", **kwargs) def seresnet542bn_cifar100(classes=100, **kwargs): """ SE-ResNet-542(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_cifar100", **kwargs) def seresnet542bn_svhn(classes=10, **kwargs): """ SE-ResNet-542(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_svhn", **kwargs) def seresnet1001_cifar10(classes=10, **kwargs): """ SE-ResNet-1001 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_cifar10", **kwargs) def seresnet1001_cifar100(classes=100, **kwargs): """ SE-ResNet-1001 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_cifar100", **kwargs) def seresnet1001_svhn(classes=10, **kwargs): """ SE-ResNet-1001 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_svhn", **kwargs) def seresnet1202_cifar10(classes=10, **kwargs): """ SE-ResNet-1202 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_cifar10", **kwargs) def seresnet1202_cifar100(classes=100, **kwargs): """ SE-ResNet-1202 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_cifar100", **kwargs) def seresnet1202_svhn(classes=10, **kwargs): """ SE-ResNet-1202 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (seresnet20_cifar10, 10), (seresnet20_cifar100, 100), (seresnet20_svhn, 10), (seresnet56_cifar10, 10), (seresnet56_cifar100, 100), (seresnet56_svhn, 10), (seresnet110_cifar10, 10), (seresnet110_cifar100, 100), (seresnet110_svhn, 10), (seresnet164bn_cifar10, 10), (seresnet164bn_cifar100, 100), (seresnet164bn_svhn, 10), (seresnet272bn_cifar10, 10), (seresnet272bn_cifar100, 100), (seresnet272bn_svhn, 10), (seresnet542bn_cifar10, 10), (seresnet542bn_cifar100, 100), (seresnet542bn_svhn, 10), (seresnet1001_cifar10, 10), (seresnet1001_cifar100, 100), (seresnet1001_svhn, 10), (seresnet1202_cifar10, 10), (seresnet1202_cifar100, 100), (seresnet1202_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != seresnet20_cifar10 or weight_count == 274847) assert (model != seresnet20_cifar100 or weight_count == 280697) assert (model != seresnet20_svhn or weight_count == 274847) assert (model != seresnet56_cifar10 or weight_count == 862889) assert (model != seresnet56_cifar100 or weight_count == 868739) assert (model != seresnet56_svhn or weight_count == 862889) assert (model != seresnet110_cifar10 or weight_count == 1744952) assert (model != seresnet110_cifar100 or weight_count == 1750802) assert (model != seresnet110_svhn or weight_count == 1744952) assert (model != seresnet164bn_cifar10 or weight_count == 1906258) assert (model != seresnet164bn_cifar100 or weight_count == 1929388) assert (model != seresnet164bn_svhn or weight_count == 1906258) assert (model != seresnet272bn_cifar10 or weight_count == 3153826) assert (model != seresnet272bn_cifar100 or weight_count == 3176956) assert (model != seresnet272bn_svhn or weight_count == 3153826) assert (model != seresnet542bn_cifar10 or weight_count == 6272746) assert (model != seresnet542bn_cifar100 or weight_count == 6295876) assert (model != seresnet542bn_svhn or weight_count == 6272746) assert (model != seresnet1001_cifar10 or weight_count == 11574910) assert (model != seresnet1001_cifar100 or weight_count == 11598040) assert (model != seresnet1001_svhn or weight_count == 11574910) assert (model != seresnet1202_cifar10 or weight_count == 19582226) assert (model != seresnet1202_cifar100 or weight_count == 19588076) assert (model != seresnet1202_svhn or weight_count == 19582226) x = np.zeros((1, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (1, classes)) if __name__ == "__main__": _test()
23,331
36.391026
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/resnetd.py
""" ResNet(D) with dilation for ImageNet-1K, implemented in Chainer. Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. """ __all__ = ['ResNetD', 'resnetd50b', 'resnetd101b', 'resnetd152b'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import MultiOutputSequential, SimpleSequential from .resnet import ResUnit, ResInitBlock from .senet import SEInitBlock class ResNetD(Chain): """ ResNet(D) with dilation model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. ordinary_init : bool, default False Whether to use original initial block or SENet one. bends : tuple of int, default None Numbers of bends for multiple output. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, ordinary_init=False, bends=None, in_channels=3, in_size=(224, 224), classes=1000): super(ResNetD, self).__init__() self.in_size = in_size self.classes = classes self.multi_output = (bends is not None) with self.init_scope(): self.features = MultiOutputSequential() with self.features.init_scope(): if ordinary_init: setattr(self.features, "init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) else: init_block_channels = 2 * init_block_channels setattr(self.features, "init_block", SEInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if ((j == 0) and (i != 0) and (i < 2)) else 1 dilate = (2 ** max(0, i - 1 - int(j == 0))) setattr(stage, "unit{}".format(j + 1), ResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, pad=dilate, dilate=dilate, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels if self.multi_output and ((i + 1) in bends): stage.do_output = True setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=(in_size[0] // 8, in_size[1] // 8))) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): outs = self.features(x) x = outs[0] x = self.output(x) if self.multi_output: return [x] + outs[1:] else: return x def get_resnetd(blocks, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ResNet(D) with dilation model with specific parameters. Parameters: ---------- blocks : int Number of blocks. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14: layers = [2, 2, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported ResNet(D) with number of blocks: {}".format(blocks)) init_block_channels = 64 if blocks < 50: channels_per_layers = [64, 128, 256, 512] bottleneck = False else: channels_per_layers = [256, 512, 1024, 2048] bottleneck = True channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: # channels = [[int(cij * width_scale) for cij in ci] for ci in channels] channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = ResNetD( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def resnetd50b(**kwargs): """ ResNet(D)-50 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnetd(blocks=50, conv1_stride=False, model_name="resnetd50b", **kwargs) def resnetd101b(**kwargs): """ ResNet(D)-101 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnetd(blocks=101, conv1_stride=False, model_name="resnetd101b", **kwargs) def resnetd152b(**kwargs): """ ResNet(D)-152 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnetd(blocks=152, conv1_stride=False, model_name="resnetd152b", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False ordinary_init = False bends = None pretrained = False models = [ resnetd50b, resnetd101b, resnetd152b, ] for model in models: net = model( pretrained=pretrained, ordinary_init=ordinary_init, bends=bends) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) if ordinary_init: assert (model != resnetd50b or weight_count == 25557032) assert (model != resnetd101b or weight_count == 44549160) assert (model != resnetd152b or weight_count == 60192808) else: assert (model != resnetd50b or weight_count == 25680808) assert (model != resnetd101b or weight_count == 44672936) assert (model != resnetd152b or weight_count == 60316584) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) if bends is not None: y = y[0] assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
10,036
34.094406
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/quartznet.py
""" QuartzNet for ASR, implemented in Chainer. Original paper: 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. """ __all__ = ['quartznet5x5_en_ls', 'quartznet15x5_en', 'quartznet15x5_en_nr', 'quartznet15x5_fr', 'quartznet15x5_de', 'quartznet15x5_it', 'quartznet15x5_es', 'quartznet15x5_ca', 'quartznet15x5_pl', 'quartznet15x5_ru', 'quartznet15x5_ru34'] from .jasper import get_jasper def quartznet5x5_en_ls(classes=29, **kwargs): """ QuartzNet 5x5 model for English language (trained on LibriSpeech dataset) from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"] return get_jasper(classes=classes, version=("quartznet", "5x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet5x5_en_ls", **kwargs) def quartznet15x5_en(classes=29, **kwargs): """ QuartzNet 15x5 model for English language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_en", **kwargs) def quartznet15x5_en_nr(classes=29, **kwargs): """ QuartzNet 15x5 model for English language (with presence of noise) from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_en_nr", **kwargs) def quartznet15x5_fr(classes=43, **kwargs): """ QuartzNet 15x5 model for French language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 43 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'ç', 'é', 'â', 'ê', 'î', 'ô', 'û', 'à', 'è', 'ù', 'ë', 'ï', 'ü', 'ÿ'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_fr", **kwargs) def quartznet15x5_de(classes=32, **kwargs): """ QuartzNet 15x5 model for German language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 32 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'ä', 'ö', 'ü', 'ß'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_de", **kwargs) def quartznet15x5_it(classes=39, **kwargs): """ QuartzNet 15x5 model for Italian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 39 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ì', 'î', 'ó', 'ò', 'ú', 'ù'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_it", **kwargs) def quartznet15x5_es(classes=36, **kwargs): """ QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 36 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'á', 'é', 'í', 'ó', 'ú', 'ñ', 'ü'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_es", **kwargs) def quartznet15x5_ca(classes=39, **kwargs): """ QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 39 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ï', 'ó', 'ò', 'ú', 'ü', 'ŀ'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_ca", **kwargs) def quartznet15x5_pl(classes=34, **kwargs): """ QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 34 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'ą', 'b', 'c', 'ć', 'd', 'e', 'ę', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'ł', 'm', 'n', 'ń', 'o', 'ó', 'p', 'r', 's', 'ś', 't', 'u', 'w', 'y', 'z', 'ź', 'ż'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_pl", **kwargs) def quartznet15x5_ru(classes=35, **kwargs): """ QuartzNet 15x5 model for Russian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 35 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ё', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_ru", **kwargs) def quartznet15x5_ru34(classes=34, **kwargs): """ QuartzNet 15x5 model for Russian language (32 graphemes) from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 34 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_ru34", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False from_audio = True audio_features = 64 models = [ quartznet5x5_en_ls, quartznet15x5_en, quartznet15x5_en_nr, quartznet15x5_fr, quartznet15x5_de, quartznet15x5_it, quartznet15x5_es, quartznet15x5_ca, quartznet15x5_pl, quartznet15x5_ru, quartznet15x5_ru34, ] for model in models: net = model( in_channels=audio_features, from_audio=from_audio, pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != quartznet5x5_en_ls or weight_count == 6713181) assert (model != quartznet15x5_en or weight_count == 18924381) assert (model != quartznet15x5_en_nr or weight_count == 18924381) assert (model != quartznet15x5_fr or weight_count == 18938731) assert (model != quartznet15x5_de or weight_count == 18927456) assert (model != quartznet15x5_it or weight_count == 18934631) assert (model != quartznet15x5_es or weight_count == 18931556) assert (model != quartznet15x5_ca or weight_count == 18934631) assert (model != quartznet15x5_pl or weight_count == 18929506) assert (model != quartznet15x5_ru or weight_count == 18930531) assert (model != quartznet15x5_ru34 or weight_count == 18929506) batch = 3 aud_scale = 640 if from_audio else 1 seq_len = np.random.randint(150, 250, batch) * aud_scale seq_len_max = seq_len.max() + 2 x_shape = (batch, seq_len_max) if from_audio else (batch, audio_features, seq_len_max) x = np.random.rand(*x_shape).astype(np.float32) x_len = seq_len.astype(np.long) y, y_len = net(x, x_len) assert (y.shape[:2] == (batch, net.classes)) if from_audio: assert (y.shape[2] in range(seq_len_max // aud_scale * 2, seq_len_max // aud_scale * 2 + 9)) else: assert (y.shape[2] in [seq_len_max // 2, seq_len_max // 2 + 1]) if __name__ == "__main__": _test()
13,081
42.899329
119
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/preresnet.py
""" PreResNet for ImageNet-1K, implemented in Chainer. Original paper: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. """ __all__ = ['PreResNet', 'preresnet10', 'preresnet12', 'preresnet14', 'preresnetbc14b', 'preresnet16', 'preresnet18_wd4', 'preresnet18_wd2', 'preresnet18_w3d4', 'preresnet18', 'preresnet26', 'preresnetbc26b', 'preresnet34', 'preresnetbc38b', 'preresnet50', 'preresnet50b', 'preresnet101', 'preresnet101b', 'preresnet152', 'preresnet152b', 'preresnet200', 'preresnet200b', 'preresnet269b', 'PreResBlock', 'PreResBottleneck', 'PreResUnit', 'PreResInitBlock', 'PreResActivation'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1, SimpleSequential class PreResBlock(Chain): """ Simple PreResNet block for residual path in PreResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. """ def __init__(self, in_channels, out_channels, stride, use_bias=False, use_bn=True): super(PreResBlock, self).__init__() with self.init_scope(): self.conv1 = pre_conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=use_bias, use_bn=use_bn, return_preact=True) self.conv2 = pre_conv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn) def __call__(self, x): x, x_pre_activ = self.conv1(x) x = self.conv2(x) return x, x_pre_activ class PreResBottleneck(Chain): """ PreResNet bottleneck block for residual path in PreResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, conv1_stride): super(PreResBottleneck, self).__init__() mid_channels = out_channels // 4 with self.init_scope(): self.conv1 = pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels, stride=(stride if conv1_stride else 1), return_preact=True) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=(1 if conv1_stride else stride)) self.conv3 = pre_conv1x1_block( in_channels=mid_channels, out_channels=out_channels) def __call__(self, x): x, x_pre_activ = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x, x_pre_activ class PreResUnit(Chain): """ PreResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, use_bias=False, use_bn=True, bottleneck=True, conv1_stride=False): super(PreResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): if bottleneck: self.body = PreResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride) else: self.body = PreResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=use_bias, use_bn=use_bn) if self.resize_identity: self.identity_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=use_bias) def __call__(self, x): identity = x x, x_pre_activ = self.body(x) if self.resize_identity: identity = self.identity_conv(x_pre_activ) x = x + identity return x class PreResInitBlock(Chain): """ PreResNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(PreResInitBlock, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=7, stride=2, pad=3, nobias=True) self.bn = L.BatchNormalization(size=out_channels) self.activ = F.relu self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) def __call__(self, x): x = self.conv(x) x = self.bn(x) x = self.activ(x) x = self.pool(x) return x class PreResActivation(Chain): """ PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(PreResActivation, self).__init__() with self.init_scope(): self.bn = L.BatchNormalization( size=in_channels, eps=1e-5) self.activ = F.relu def __call__(self, x): x = self.bn(x) x = self.activ(x) return x class PreResNet(Chain): """ PreResNet model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000): super(PreResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), PreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "post_activ", PreResActivation( in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_preresnet(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create PreResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] elif blocks == 269: layers = [3, 30, 48, 8] else: raise ValueError("Unsupported PreResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = PreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def preresnet10(**kwargs): """ PreResNet-10 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=10, model_name="preresnet10", **kwargs) def preresnet12(**kwargs): """ PreResNet-12 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=12, model_name="preresnet12", **kwargs) def preresnet14(**kwargs): """ PreResNet-14 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=14, model_name="preresnet14", **kwargs) def preresnetbc14b(**kwargs): """ PreResNet-BC-14b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="preresnetbc14b", **kwargs) def preresnet16(**kwargs): """ PreResNet-16 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=16, model_name="preresnet16", **kwargs) def preresnet18_wd4(**kwargs): """ PreResNet-18 model with 0.25 width scale from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=18, width_scale=0.25, model_name="preresnet18_wd4", **kwargs) def preresnet18_wd2(**kwargs): """ PreResNet-18 model with 0.5 width scale from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=18, width_scale=0.5, model_name="preresnet18_wd2", **kwargs) def preresnet18_w3d4(**kwargs): """ PreResNet-18 model with 0.75 width scale from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=18, width_scale=0.75, model_name="preresnet18_w3d4", **kwargs) def preresnet18(**kwargs): """ PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=18, model_name="preresnet18", **kwargs) def preresnet26(**kwargs): """ PreResNet-26 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=26, bottleneck=False, model_name="preresnet26", **kwargs) def preresnetbc26b(**kwargs): """ PreResNet-BC-26b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="preresnetbc26b", **kwargs) def preresnet34(**kwargs): """ PreResNet-34 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=34, model_name="preresnet34", **kwargs) def preresnetbc38b(**kwargs): """ PreResNet-BC-38b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="preresnetbc38b", **kwargs) def preresnet50(**kwargs): """ PreResNet-50 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=50, model_name="preresnet50", **kwargs) def preresnet50b(**kwargs): """ PreResNet-50 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=50, conv1_stride=False, model_name="preresnet50b", **kwargs) def preresnet101(**kwargs): """ PreResNet-101 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=101, model_name="preresnet101", **kwargs) def preresnet101b(**kwargs): """ PreResNet-101 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=101, conv1_stride=False, model_name="preresnet101b", **kwargs) def preresnet152(**kwargs): """ PreResNet-152 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=152, model_name="preresnet152", **kwargs) def preresnet152b(**kwargs): """ PreResNet-152 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=152, conv1_stride=False, model_name="preresnet152b", **kwargs) def preresnet200(**kwargs): """ PreResNet-200 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=200, model_name="preresnet200", **kwargs) def preresnet200b(**kwargs): """ PreResNet-200 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=200, conv1_stride=False, model_name="preresnet200b", **kwargs) def preresnet269b(**kwargs): """ PreResNet-269 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet(blocks=269, conv1_stride=False, model_name="preresnet269b", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ preresnet10, preresnet12, preresnet14, preresnetbc14b, preresnet16, preresnet18_wd4, preresnet18_wd2, preresnet18_w3d4, preresnet18, preresnet26, preresnetbc26b, preresnet34, preresnetbc38b, preresnet50, preresnet50b, preresnet101, preresnet101b, preresnet152, preresnet152b, preresnet200, preresnet200b, preresnet269b, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != preresnet10 or weight_count == 5417128) assert (model != preresnet12 or weight_count == 5491112) assert (model != preresnet14 or weight_count == 5786536) assert (model != preresnetbc14b or weight_count == 10057384) assert (model != preresnet16 or weight_count == 6967208) assert (model != preresnet18_wd4 or weight_count == 3935960) assert (model != preresnet18_wd2 or weight_count == 5802440) assert (model != preresnet18_w3d4 or weight_count == 8473784) assert (model != preresnet18 or weight_count == 11687848) assert (model != preresnet26 or weight_count == 17958568) assert (model != preresnetbc26b or weight_count == 15987624) assert (model != preresnet34 or weight_count == 21796008) assert (model != preresnetbc38b or weight_count == 21917864) assert (model != preresnet50 or weight_count == 25549480) assert (model != preresnet50b or weight_count == 25549480) assert (model != preresnet101 or weight_count == 44541608) assert (model != preresnet101b or weight_count == 44541608) assert (model != preresnet152 or weight_count == 60185256) assert (model != preresnet152b or weight_count == 60185256) assert (model != preresnet200 or weight_count == 64666280) assert (model != preresnet200b or weight_count == 64666280) assert (model != preresnet269b or weight_count == 102065832) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
27,215
32.766749
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/lednet.py
""" LEDNet for image segmentation, implemented in Chainer. Original paper: 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation,' https://arxiv.org/abs/1905.02423. """ __all__ = ['LEDNet', 'lednet_cityscapes'] import os import chainer.functions as F from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3, conv1x1_block, conv3x3_block, conv5x5_block, conv7x7_block, ConvBlock, NormActivation,\ ChannelShuffle, InterpolationBlock, Hourglass, BreakBlock, SimpleSequential class AsymConvBlock(Chain): """ Asymmetric separable convolution block. Parameters: ---------- channels : int Number of input/output channels. ksize : int Convolution window size. pad : int Padding value for convolution layer. dilate : int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. lw_use_bn : bool, default True Whether to use BatchNorm layer (leftwise convolution block). rw_use_bn : bool, default True Whether to use BatchNorm layer (rightwise convolution block). bn_eps : float, default 1e-5 Small float added to variance in Batch norm. lw_activation : function or str or None, default F.relu Activation function after the leftwise convolution block. rw_activation : function or str or None, default F.relu Activation function after the rightwise convolution block. """ def __init__(self, channels, ksize, pad, dilate=1, groups=1, use_bias=False, lw_use_bn=True, rw_use_bn=True, bn_eps=1e-5, lw_activation=(lambda: F.relu), rw_activation=(lambda: F.relu)): super(AsymConvBlock, self).__init__() with self.init_scope(): self.lw_conv = ConvBlock( in_channels=channels, out_channels=channels, ksize=(ksize, 1), stride=1, pad=(pad, 0), dilate=(dilate, 1), groups=groups, use_bias=use_bias, use_bn=lw_use_bn, bn_eps=bn_eps, activation=lw_activation) self.rw_conv = ConvBlock( in_channels=channels, out_channels=channels, ksize=(1, ksize), stride=1, pad=(0, pad), dilate=(1, dilate), groups=groups, use_bias=use_bias, use_bn=rw_use_bn, bn_eps=bn_eps, activation=rw_activation) def __call__(self, x): x = self.lw_conv(x) x = self.rw_conv(x) return x def asym_conv3x3_block(pad=1, **kwargs): """ 3x3 asymmetric separable convolution block. Parameters: ---------- channels : int Number of input/output channels. pad : int Padding value for convolution layer. dilate : int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. lw_use_bn : bool, default True Whether to use BatchNorm layer (leftwise convolution block). rw_use_bn : bool, default True Whether to use BatchNorm layer (rightwise convolution block). bn_eps : float, default 1e-5 Small float added to variance in Batch norm. lw_activation : function or str or None, default F.relu Activation function after the leftwise convolution block. rw_activation : function or str or None, default F.relu Activation function after the rightwise convolution block. """ return AsymConvBlock( ksize=3, pad=pad, **kwargs) class LEDDownBlock(Chain): """ LEDNet specific downscale block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. correct_size_mistmatch : bool Whether to correct downscaled sizes of images. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, correct_size_mismatch, bn_eps): super(LEDDownBlock, self).__init__() self.correct_size_mismatch = correct_size_mismatch with self.init_scope(): self.pool = partial( F.max_pooling_2d, ksize=2, stride=2, cover_all=False) self.conv = conv3x3( in_channels=in_channels, out_channels=(out_channels - in_channels), stride=2, use_bias=True) self.norm_activ = NormActivation( in_channels=out_channels, bn_eps=bn_eps) def __call__(self, x): y1 = self.pool(x) y2 = self.conv(x) if self.correct_size_mismatch: diff_h = y2.size()[2] - y1.size()[2] diff_w = y2.size()[3] - y1.size()[3] y1 = F.pad( y1, pad_width=((0, 0), (0, 0), (diff_w // 2, diff_w - diff_w // 2), (diff_h // 2, diff_h - diff_h // 2)), mode="constant", constant_values=0) x = F.concat((y2, y1), axis=1) x = self.norm_activ(x) return x class LEDBranch(Chain): """ LEDNet encoder branch. Parameters: ---------- channels : int Number of input/output channels. dilate : int Dilation value for convolution layer. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, channels, dilate, dropout_rate, bn_eps): super(LEDBranch, self).__init__() self.use_dropout = (dropout_rate != 0.0) with self.init_scope(): self.conv1 = asym_conv3x3_block( channels=channels, use_bias=True, lw_use_bn=False, bn_eps=bn_eps) self.conv2 = asym_conv3x3_block( channels=channels, pad=dilate, dilate=dilate, use_bias=True, lw_use_bn=False, bn_eps=bn_eps, rw_activation=None) if self.use_dropout: self.dropout = partial( F.dropout, ratio=dropout_rate) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) if self.use_dropout: x = self.dropout(x) return x class LEDUnit(Chain): """ LEDNet encoder unit (Split-Shuffle-non-bottleneck). Parameters: ---------- channels : int Number of input/output channels. dilate : int Dilation value for convolution layer. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, channels, dilate, dropout_rate, bn_eps): super(LEDUnit, self).__init__() mid_channels = channels // 2 with self.init_scope(): self.left_branch = LEDBranch( channels=mid_channels, dilate=dilate, dropout_rate=dropout_rate, bn_eps=bn_eps) self.right_branch = LEDBranch( channels=mid_channels, dilate=dilate, dropout_rate=dropout_rate, bn_eps=bn_eps) self.activ = F.relu self.shuffle = ChannelShuffle( channels=channels, groups=2) def __call__(self, x): identity = x x1, x2 = F.split_axis(x, indices_or_sections=2, axis=1) x1 = self.left_branch(x1) x2 = self.right_branch(x2) x = F.concat((x1, x2), axis=1) x = x + identity x = self.activ(x) x = self.shuffle(x) return x class PoolingBranch(Chain): """ Pooling branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_bias : bool Whether the layer uses a bias vector. bn_eps : float Small float added to variance in Batch norm. in_size : tuple of 2 int or None Spatial size of input image. down_size : int Spatial size of downscaled image. """ def __init__(self, in_channels, out_channels, use_bias, bn_eps, in_size, down_size): super(PoolingBranch, self).__init__() self.in_size = in_size self.down_size = down_size with self.init_scope(): self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, bn_eps=bn_eps) self.up = InterpolationBlock( scale_factor=None, out_size=in_size) def __call__(self, x): in_size = self.in_size if self.in_size is not None else x.shape[2:] x = F.average_pooling_2d(x, ksize=(in_size[0] // self.down_size, in_size[1] // self.down_size)) x = self.conv(x) x = self.up(x, in_size) return x class APN(Chain): """ Attention pyramid network block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. in_size : tuple of 2 int or None Spatial size of input image. """ def __init__(self, in_channels, out_channels, bn_eps, in_size): super(APN, self).__init__() self.in_size = in_size att_out_channels = 1 with self.init_scope(): self.pool_branch = PoolingBranch( in_channels=in_channels, out_channels=out_channels, use_bias=True, bn_eps=bn_eps, in_size=in_size, down_size=1) self.body = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=True, bn_eps=bn_eps) down_seq = SimpleSequential() with down_seq.init_scope(): setattr(down_seq, "down1", conv7x7_block( in_channels=in_channels, out_channels=att_out_channels, stride=2, use_bias=True, bn_eps=bn_eps)) setattr(down_seq, "down2", conv5x5_block( in_channels=att_out_channels, out_channels=att_out_channels, stride=2, use_bias=True, bn_eps=bn_eps)) down3_subseq = SimpleSequential() with down3_subseq.init_scope(): setattr(down3_subseq, "conv1", conv3x3_block( in_channels=att_out_channels, out_channels=att_out_channels, stride=2, use_bias=True, bn_eps=bn_eps)) setattr(down3_subseq, "conv2", conv3x3_block( in_channels=att_out_channels, out_channels=att_out_channels, use_bias=True, bn_eps=bn_eps)) setattr(down_seq, "down3", down3_subseq) up_seq = SimpleSequential() with up_seq.init_scope(): up = InterpolationBlock(scale_factor=2) setattr(up_seq, "up1", up) setattr(up_seq, "up2", up) setattr(up_seq, "up3", up) skip_seq = SimpleSequential() with skip_seq.init_scope(): setattr(skip_seq, "skip1", BreakBlock()) setattr(skip_seq, "skip2", conv7x7_block( in_channels=att_out_channels, out_channels=att_out_channels, use_bias=True, bn_eps=bn_eps)) setattr(skip_seq, "skip3", conv5x5_block( in_channels=att_out_channels, out_channels=att_out_channels, use_bias=True, bn_eps=bn_eps)) self.hg = Hourglass( down_seq=down_seq, up_seq=up_seq, skip_seq=skip_seq) def __call__(self, x): y = self.pool_branch(x) w = self.hg(x) x = self.body(x) x = x * w x = x + y return x class LEDNet(Chain): """ LEDNet model from 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation,' https://arxiv.org/abs/1905.02423. Parameters: ---------- channels : list of int Number of output channels for each unit. dilates : list of int Dilations for units. dropout_rates : list of list of int Dropout rates for each unit in encoder. correct_size_mistmatch : bool Whether to correct downscaled sizes of images in encoder. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. classes : int, default 19 Number of segmentation classes. """ def __init__(self, channels, dilates, dropout_rates, correct_size_mismatch=False, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), classes=19): super(LEDNet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.classes = classes self.fixed_size = fixed_size with self.init_scope(): self.encoder = SimpleSequential() with self.encoder.init_scope(): for i, dilates_per_stage in enumerate(dilates): out_channels = channels[i] dropout_rate = dropout_rates[i] stage = SimpleSequential() with stage.init_scope(): for j, dilate in enumerate(dilates_per_stage): if j == 0: setattr(stage, "unit{}".format(j + 1), LEDDownBlock( in_channels=in_channels, out_channels=out_channels, correct_size_mismatch=correct_size_mismatch, bn_eps=bn_eps)) in_channels = out_channels else: setattr(stage, "unit{}".format(j + 1), LEDUnit( channels=in_channels, dilate=dilate, dropout_rate=dropout_rate, bn_eps=bn_eps)) setattr(self.encoder, "stage{}".format(i + 1), stage) self.apn = APN( in_channels=in_channels, out_channels=classes, bn_eps=bn_eps, in_size=(in_size[0] // 8, in_size[1] // 8) if fixed_size else None) self.up = InterpolationBlock( scale_factor=8, align_corners=True) def __call__(self, x): x = self.encoder(x) x = self.apn(x) x = self.up(x) return x def get_lednet(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create LEDNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ channels = [32, 64, 128] dilates = [[0, 1, 1, 1], [0, 1, 1], [0, 1, 2, 5, 9, 2, 5, 9, 17]] dropout_rates = [0.03, 0.03, 0.3] bn_eps = 1e-3 net = LEDNet( channels=channels, dilates=dilates, dropout_rates=dropout_rates, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def lednet_cityscapes(classes=19, **kwargs): """ LEDNet model for Cityscapes from 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation,' https://arxiv.org/abs/1905.02423. Parameters: ---------- classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_lednet(classes=classes, model_name="lednet_cityscapes", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False fixed_size = True correct_size_mismatch = False in_size = (1024, 2048) classes = 19 models = [ lednet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size, correct_size_mismatch=correct_size_mismatch) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != lednet_cityscapes or weight_count == 922821) batch = 4 x = np.random.rand(batch, 3, in_size[0], in_size[1]).astype(np.float32) y = net(x) assert (y.shape == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
19,734
31.246732
117
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/hardnet.py
""" HarDNet for ImageNet-1K, implemented in Chainer. Original paper: 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948. """ __all__ = ['HarDNet', 'hardnet39ds', 'hardnet68ds', 'hardnet68', 'hardnet85'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv_block, SimpleSequential class InvDwsConvBlock(Chain): """ Inverse depthwise separable convolution block with BatchNorms and activations at each convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. pw_activation : function or str or None, default F.relu Activation function after the pointwise convolution block. dw_activation : function or str or None, default F.relu Activation function after the depthwise convolution block. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, use_bias=False, use_bn=True, bn_eps=1e-5, pw_activation=(lambda: F.relu), dw_activation=(lambda: F.relu)): super(InvDwsConvBlock, self).__init__() with self.init_scope(): self.pw_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=pw_activation) self.dw_conv = dwconv_block( in_channels=out_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, dilate=dilate, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=dw_activation) def __call__(self, x): x = self.pw_conv(x) x = self.dw_conv(x) return x def invdwsconv3x3_block(in_channels, out_channels, stride=1, pad=1, dilate=1, use_bias=False, bn_eps=1e-5, pw_activation=(lambda: F.relu), dw_activation=(lambda: F.relu)): """ 3x3 inverse depthwise separable version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. pw_activation : function or str or None, default F.relu Activation function after the pointwise convolution block. dw_activation : function or str or None, default F.relu Activation function after the depthwise convolution block. """ return InvDwsConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, dilate=dilate, use_bias=use_bias, bn_eps=bn_eps, pw_activation=pw_activation, dw_activation=dw_activation) class HarDUnit(Chain): """ HarDNet unit. Parameters: ---------- in_channels_list : list of int Number of input channels for each block. out_channels_list : list of int Number of output channels for each block. links_list : list of list of int List of indices for each layer. use_deptwise : bool Whether to use depthwise downsampling. use_dropout : bool Whether to use dropout module. downsampling : bool Whether to downsample input. activation : str Name of activation function. """ def __init__(self, in_channels_list, out_channels_list, links_list, use_deptwise, use_dropout, downsampling, activation): super(HarDUnit, self).__init__() self.links_list = links_list self.use_dropout = use_dropout self.downsampling = downsampling with self.init_scope(): self.blocks = SimpleSequential() with self.blocks.init_scope(): for i in range(len(links_list)): in_channels = in_channels_list[i] out_channels = out_channels_list[i] if use_deptwise: unit = invdwsconv3x3_block( in_channels=in_channels, out_channels=out_channels, pw_activation=activation, dw_activation=None) else: unit = conv3x3_block( in_channels=in_channels, out_channels=out_channels) setattr(self.blocks, "block{}".format(i + 1), unit) if self.use_dropout: self.dropout = partial( F.dropout, ratio=0.1) self.conv = conv1x1_block( in_channels=in_channels_list[-1], out_channels=out_channels_list[-1], activation=activation) if self.downsampling: if use_deptwise: self.downsample = dwconv3x3_block( in_channels=out_channels_list[-1], out_channels=out_channels_list[-1], stride=2, activation=None) else: self.downsample = partial( F.max_pooling_2d, ksize=2, stride=2, cover_all=False) def __call__(self, x): layer_outs = [x] for links_i, layer_name_i in zip(self.links_list, self.blocks.layer_names): layer_i = self.blocks[layer_name_i] layer_in = [] for idx_ij in links_i: layer_in.append(layer_outs[idx_ij]) if len(layer_in) > 1: x = F.concat(layer_in, axis=1) else: x = layer_in[0] out = layer_i(x) layer_outs.append(out) outs = [] for i, layer_out_i in enumerate(layer_outs): if (i == len(layer_outs) - 1) or (i % 2 == 1): outs.append(layer_out_i) x = F.concat(outs, axis=1) if self.use_dropout: x = self.dropout(x) x = self.conv(x) if self.downsampling: x = self.downsample(x) return x class HarDInitBlock(Chain): """ HarDNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_deptwise : bool Whether to use depthwise downsampling. activation : str Name of activation function. """ def __init__(self, in_channels, out_channels, use_deptwise, activation): super(HarDInitBlock, self).__init__() mid_channels = out_channels // 2 with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2, activation=activation) conv2_block_class = conv1x1_block if use_deptwise else conv3x3_block self.conv2 = conv2_block_class( in_channels=mid_channels, out_channels=out_channels, activation=activation) if use_deptwise: self.downsample = dwconv3x3_block( in_channels=out_channels, out_channels=out_channels, stride=2, activation=None) else: self.downsample = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.downsample(x) return x class HarDNet(Chain): """ HarDNet model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948. Parameters: ---------- init_block_channels : int Number of output channels for the initial unit. unit_in_channels : list of list of list of int Number of input channels for each layer in each stage. unit_out_channels : list list of of list of int Number of output channels for each layer in each stage. unit_links : list of list of list of int List of indices for each layer in each stage. use_deptwise : bool Whether to use depthwise downsampling. use_last_dropout : bool Whether to use dropouts in the last unit. output_dropout_rate : float Parameter of Dropout layer before classifier. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, init_block_channels, unit_in_channels, unit_out_channels, unit_links, use_deptwise, use_last_dropout, output_dropout_rate, in_channels=3, in_size=(224, 224), classes=1000): super(HarDNet, self).__init__() self.in_size = in_size self.classes = classes activation = "relu6" with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", HarDInitBlock( in_channels=in_channels, out_channels=init_block_channels, use_deptwise=use_deptwise, activation=activation)) for i, (in_channels_list_i, out_channels_list_i) in enumerate(zip(unit_in_channels, unit_out_channels)): stage = SimpleSequential() with stage.init_scope(): for j, (in_channels_list_ij, out_channels_list_ij) in enumerate(zip(in_channels_list_i, out_channels_list_i)): use_dropout = ((j == len(in_channels_list_i) - 1) and (i == len(unit_in_channels) - 1) and use_last_dropout) downsampling = ((j == len(in_channels_list_i) - 1) and (i != len(unit_in_channels) - 1)) setattr(stage, "unit{}".format(j + 1), HarDUnit( in_channels_list=in_channels_list_ij, out_channels_list=out_channels_list_ij, links_list=unit_links[i][j], use_deptwise=use_deptwise, use_dropout=use_dropout, downsampling=downsampling, activation=activation)) setattr(self.features, "stage{}".format(i + 1), stage) in_channels = unit_out_channels[-1][-1][-1] setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "dropout", partial( F.dropout, ratio=output_dropout_rate)) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_hardnet(blocks, use_deptwise=True, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create HarDNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. use_deepwise : bool, default True Whether to use depthwise separable version of the model. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 39: init_block_channels = 48 growth_factor = 1.6 dropout_rate = 0.05 if use_deptwise else 0.1 layers = [4, 16, 8, 4] channels_per_layers = [96, 320, 640, 1024] growth_rates = [16, 20, 64, 160] downsamples = [1, 1, 1, 0] use_dropout = False elif blocks == 68: init_block_channels = 64 growth_factor = 1.7 dropout_rate = 0.05 if use_deptwise else 0.1 layers = [8, 16, 16, 16, 4] channels_per_layers = [128, 256, 320, 640, 1024] growth_rates = [14, 16, 20, 40, 160] downsamples = [1, 0, 1, 1, 0] use_dropout = False elif blocks == 85: init_block_channels = 96 growth_factor = 1.7 dropout_rate = 0.05 if use_deptwise else 0.2 layers = [8, 16, 16, 16, 16, 4] channels_per_layers = [192, 256, 320, 480, 720, 1280] growth_rates = [24, 24, 28, 36, 48, 256] downsamples = [1, 0, 1, 0, 1, 0] use_dropout = True else: raise ValueError("Unsupported HarDNet version with number of layers {}".format(blocks)) assert (downsamples[-1] == 0) def calc_stage_params(): def calc_unit_params(): def calc_blocks_params(layer_idx, base_channels, growth_rate): if layer_idx == 0: return base_channels, 0, [] out_channels_ij = growth_rate links_ij = [] for k in range(10): dv = 2 ** k if layer_idx % dv == 0: t = layer_idx - dv links_ij.append(t) if k > 0: out_channels_ij *= growth_factor out_channels_ij = int(int(out_channels_ij + 1) / 2) * 2 in_channels_ij = 0 for t in links_ij: out_channels_ik, _, _ = calc_blocks_params( layer_idx=t, base_channels=base_channels, growth_rate=growth_rate) in_channels_ij += out_channels_ik return out_channels_ij, in_channels_ij, links_ij unit_out_channels = [] unit_in_channels = [] unit_links = [] for num_layers, growth_rate, base_channels, channels_per_layers_i in zip( layers, growth_rates, [init_block_channels] + channels_per_layers[:-1], channels_per_layers): stage_out_channels_i = 0 unit_out_channels_i = [] unit_in_channels_i = [] unit_links_i = [] for j in range(num_layers): out_channels_ij, in_channels_ij, links_ij = calc_blocks_params( layer_idx=(j + 1), base_channels=base_channels, growth_rate=growth_rate) unit_out_channels_i.append(out_channels_ij) unit_in_channels_i.append(in_channels_ij) unit_links_i.append(links_ij) if (j % 2 == 0) or (j == num_layers - 1): stage_out_channels_i += out_channels_ij unit_in_channels_i.append(stage_out_channels_i) unit_out_channels_i.append(channels_per_layers_i) unit_out_channels.append(unit_out_channels_i) unit_in_channels.append(unit_in_channels_i) unit_links.append(unit_links_i) return unit_out_channels, unit_in_channels, unit_links unit_out_channels, unit_in_channels, unit_links = calc_unit_params() stage_out_channels = [] stage_in_channels = [] stage_links = [] stage_out_channels_k = None for i in range(len(layers)): if stage_out_channels_k is None: stage_out_channels_k = [] stage_in_channels_k = [] stage_links_k = [] stage_out_channels_k.append(unit_out_channels[i]) stage_in_channels_k.append(unit_in_channels[i]) stage_links_k.append(unit_links[i]) if (downsamples[i] == 1) or (i == len(layers) - 1): stage_out_channels.append(stage_out_channels_k) stage_in_channels.append(stage_in_channels_k) stage_links.append(stage_links_k) stage_out_channels_k = None return stage_out_channels, stage_in_channels, stage_links stage_out_channels, stage_in_channels, stage_links = calc_stage_params() net = HarDNet( init_block_channels=init_block_channels, unit_in_channels=stage_in_channels, unit_out_channels=stage_out_channels, unit_links=stage_links, use_deptwise=use_deptwise, use_last_dropout=use_dropout, output_dropout_rate=dropout_rate, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def hardnet39ds(**kwargs): """ HarDNet-39DS (Depthwise Separable) model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_hardnet(blocks=39, use_deptwise=True, model_name="hardnet39ds", **kwargs) def hardnet68ds(**kwargs): """ HarDNet-68DS (Depthwise Separable) model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_hardnet(blocks=68, use_deptwise=True, model_name="hardnet68ds", **kwargs) def hardnet68(**kwargs): """ HarDNet-68 model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_hardnet(blocks=68, use_deptwise=False, model_name="hardnet68", **kwargs) def hardnet85(**kwargs): """ HarDNet-85 model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_hardnet(blocks=85, use_deptwise=False, model_name="hardnet85", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ hardnet39ds, hardnet68ds, hardnet68, hardnet85, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != hardnet39ds or weight_count == 3488228) assert (model != hardnet68ds or weight_count == 4180602) assert (model != hardnet68 or weight_count == 17565348) assert (model != hardnet85 or weight_count == 36670212) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
22,589
35.028708
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/sinet.py
""" SINet for image segmentation, implemented in Chainer. Original paper: 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and Information Blocking Decoder,' https://arxiv.org/abs/1911.09099. """ __all__ = ['SINet', 'sinet_cityscapes'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, get_activation_layer, conv1x1_block, conv3x3_block, round_channels, dwconv_block,\ Concurrent, ChannelShuffle, SimpleSequential class InterpolationBlock(Chain): """ Interpolation block. Parameters: ---------- scale_factor : int Multiplier for spatial size. out_size : tuple of 2 int, default None Spatial size of the output tensor for the bilinear upsampling operation. """ def __init__(self, scale_factor, out_size=None): super(InterpolationBlock, self).__init__() self.scale_factor = scale_factor self.out_size = out_size def __call__(self, x): out_size = self.out_size if (self.out_size is not None) else\ (x.shape[2] * self.scale_factor, x.shape[3] * self.scale_factor) return F.resize_images(x, output_shape=out_size) class SEBlock(Chain): """ SINet version of Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : int Number of channels. reduction : int, default 16 Squeeze reduction value. round_mid : bool, default False Whether to round middle channel number (make divisible by 8). activation : function, or str, or nn.Module, default 'relu' Activation function after the first convolution. out_activation : function, or str, or nn.Module, default 'sigmoid' Activation function after the last convolution. """ def __init__(self, channels, reduction=16, round_mid=False, mid_activation=(lambda: F.relu), out_activation=(lambda: F.sigmoid)): super(SEBlock, self).__init__() self.use_conv2 = (reduction > 1) mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction) with self.init_scope(): self.fc1 = L.Linear( in_size=channels, out_size=mid_channels) if self.use_conv2: self.activ = get_activation_layer(mid_activation) self.fc2 = L.Linear( in_size=mid_channels, out_size=channels) self.sigmoid = get_activation_layer(out_activation) def __call__(self, x): w = F.average_pooling_2d(x, ksize=x.shape[2:]) w = self.fc1(w) if self.use_conv2: w = self.activ(w) w = self.fc2(w) w = self.sigmoid(w) w = F.broadcast_to(F.expand_dims(F.expand_dims(w, axis=2), axis=3), x.shape) x = x * w return x class DwsConvBlock(Chain): """ SINet version of depthwise separable convolution block with BatchNorms and activations at each convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int pad value for convolution layer. dilate : int or tuple/list of 2 int, default 1 dilate value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. dw_use_bn : bool, default True Whether to use BatchNorm layer (depthwise convolution block). pw_use_bn : bool, default True Whether to use BatchNorm layer (pointwise convolution block). bn_eps : float, default 1e-5 Small float added to variance in Batch norm. dw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the depthwise convolution block. pw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the pointwise convolution block. se_reduction : int, default 0 Squeeze reduction value (0 means no-se). """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, use_bias=False, dw_use_bn=True, pw_use_bn=True, bn_eps=1e-5, dw_activation=(lambda: F.relu), pw_activation=(lambda: F.relu), se_reduction=0): super(DwsConvBlock, self).__init__() self.use_se = (se_reduction > 0) with self.init_scope(): self.dw_conv = dwconv_block( in_channels=in_channels, out_channels=in_channels, ksize=ksize, stride=stride, pad=pad, dilate=dilate, use_bias=use_bias, use_bn=dw_use_bn, bn_eps=bn_eps, activation=dw_activation) if self.use_se: self.se = SEBlock( channels=in_channels, reduction=se_reduction, round_mid=False, mid_activation=(lambda: L.PReLU(shape=(in_channels // se_reduction,))), out_activation=(lambda: L.PReLU(shape=(in_channels,)))) self.pw_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, use_bn=pw_use_bn, bn_eps=bn_eps, activation=pw_activation) def __call__(self, x): x = self.dw_conv(x) if self.use_se: x = self.se(x) x = self.pw_conv(x) return x def dwsconv3x3_block(in_channels, out_channels, stride=1, pad=1, dilate=1, use_bias=False, dw_use_bn=True, pw_use_bn=True, bn_eps=1e-5, dw_activation=(lambda: F.relu), pw_activation=(lambda: F.relu), se_reduction=0): """ 3x3 depthwise separable version of the standard convolution block (SINet version). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 pad value for convolution layer. dilate : int or tuple/list of 2 int, default 1 dilate value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. dw_use_bn : bool, default True Whether to use BatchNorm layer (depthwise convolution block). pw_use_bn : bool, default True Whether to use BatchNorm layer (pointwise convolution block). bn_eps : float, default 1e-5 Small float added to variance in Batch norm. dw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the depthwise convolution block. pw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the pointwise convolution block. se_reduction : int, default 0 Squeeze reduction value (0 means no-se). """ return DwsConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, dilate=dilate, use_bias=use_bias, dw_use_bn=dw_use_bn, pw_use_bn=pw_use_bn, bn_eps=bn_eps, dw_activation=dw_activation, pw_activation=pw_activation, se_reduction=se_reduction) def dwconv3x3_block(in_channels, out_channels, stride=1, pad=1, dilate=1, use_bias=False, bn_eps=1e-5, activation=(lambda: F.relu)): """ 3x3 depthwise version of the standard convolution block (SINet version). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 pad value for convolution layer. dilate : int or tuple/list of 2 int, default 1 dilate value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return dwconv_block( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, dilate=dilate, use_bias=use_bias, bn_eps=bn_eps, activation=activation) class FDWConvBlock(Chain): """ Factorized depthwise separable convolution block with BatchNorms and activations at each convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int pad value for convolution layer. dilate : int or tuple/list of 2 int, default 1 dilate value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the each convolution block. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu)): super(FDWConvBlock, self).__init__() assert use_bn self.activate = (activation is not None) with self.init_scope(): self.v_conv = dwconv_block( in_channels=in_channels, out_channels=out_channels, ksize=(ksize, 1), stride=stride, pad=(pad, 0), dilate=dilate, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=None) self.h_conv = dwconv_block( in_channels=in_channels, out_channels=out_channels, ksize=(1, ksize), stride=stride, pad=(0, pad), dilate=dilate, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=None) if self.activate: self.act = get_activation_layer(activation) def __call__(self, x): x = self.v_conv(x) + self.h_conv(x) if self.activate: x = self.act(x) return x def fdwconv3x3_block(in_channels, out_channels, stride=1, pad=1, dilate=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu)): """ 3x3 factorized depthwise version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int, default 1 pad value for convolution layer. dilate : int or tuple/list of 2 int, default 1 dilate value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return FDWConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, dilate=dilate, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) def fdwconv5x5_block(in_channels, out_channels, stride=1, pad=2, dilate=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu)): """ 5x5 factorized depthwise version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int, default 1 pad value for convolution layer. dilate : int or tuple/list of 2 int, default 1 dilate value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return FDWConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=5, stride=stride, pad=pad, dilate=dilate, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) class SBBlock(Chain): """ SB-block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int Convolution window size for a factorized depthwise separable convolution block. scale_factor : int Scale factor. size : tuple of 2 int Spatial size of the output tensor for the bilinear upsampling operation. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, ksize, scale_factor, size, bn_eps): super(SBBlock, self).__init__() self.use_scale = (scale_factor > 1) with self.init_scope(): if self.use_scale: self.down_scale = partial( F.average_pooling_2d, ksize=scale_factor, stride=scale_factor) self.up_scale = InterpolationBlock( scale_factor=scale_factor, out_size=size) use_fdw = (scale_factor > 0) if use_fdw: fdwconv3x3_class = fdwconv3x3_block if ksize == 3 else fdwconv5x5_block self.conv1 = fdwconv3x3_class( in_channels=in_channels, out_channels=in_channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(shape=(in_channels,)))) else: self.conv1 = dwconv3x3_block( in_channels=in_channels, out_channels=in_channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(shape=(in_channels,)))) self.conv2 = conv1x1( in_channels=in_channels, out_channels=out_channels) self.bn = L.BatchNormalization( size=out_channels, eps=bn_eps) def __call__(self, x): if self.use_scale: x = self.down_scale(x) x = self.conv1(x) x = self.conv2(x) if self.use_scale: x = self.up_scale(x) x = self.bn(x) return x class PreActivation(Chain): """ PreResNet like pure pre-activation block without convolution layer. Parameters: ---------- in_channels : int Number of input channels. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. """ def __init__(self, in_channels, bn_eps=1e-5): super(PreActivation, self).__init__() with self.init_scope(): self.bn = L.BatchNormalization( size=in_channels, eps=bn_eps) self.activ = L.PReLU(shape=(in_channels,)) def __call__(self, x): x = self.bn(x) x = self.activ(x) return x class ESPBlock(Chain): """ ESP block, which is based on the following principle: Reduce ---> Split ---> Transform --> Merge. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksizes : list of int Convolution window size for branches. scale_factors : list of int Scale factor for branches. use_residual : bool Whether to use residual connection. in_size : tuple of 2 int Spatial size of the output tensor for the bilinear upsampling operation. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, ksizes, scale_factors, use_residual, in_size, bn_eps): super(ESPBlock, self).__init__() self.use_residual = use_residual groups = len(ksizes) mid_channels = int(out_channels / groups) res_channels = out_channels - groups * mid_channels with self.init_scope(): self.conv = conv1x1( in_channels=in_channels, out_channels=mid_channels, groups=groups) self.c_shuffle = ChannelShuffle( channels=mid_channels, groups=groups) self.branches = Concurrent() with self.branches.init_scope(): for i in range(groups): out_channels_i = (mid_channels + res_channels) if i == 0 else mid_channels setattr(self.branches, "branch{}".format(i + 1), SBBlock( in_channels=mid_channels, out_channels=out_channels_i, ksize=ksizes[i], scale_factor=scale_factors[i], size=in_size, bn_eps=bn_eps)) self.preactiv = PreActivation( in_channels=out_channels, bn_eps=bn_eps) def __call__(self, x): if self.use_residual: identity = x x = self.conv(x) x = self.c_shuffle(x) x = self.branches(x) if self.use_residual: x = identity + x x = self.preactiv(x) return x class SBStage(Chain): """ SB stage. Parameters: ---------- in_channels : int Number of input channels. down_channels : int Number of output channels for a downscale block. channels_list : list of int Number of output channels for all residual block. ksizes_list : list of int Convolution window size for branches. scale_factors_list : list of int Scale factor for branches. use_residual_list : list of int List of flags for using residual in each ESP-block. se_reduction : int Squeeze reduction value (0 means no-se). in_size : tuple of 2 int Spatial size of the output tensor for the bilinear upsampling operation. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, down_channels, channels_list, ksizes_list, scale_factors_list, use_residual_list, se_reduction, in_size, bn_eps): super(SBStage, self).__init__() with self.init_scope(): self.down_conv = dwsconv3x3_block( in_channels=in_channels, out_channels=down_channels, stride=2, dw_use_bn=False, bn_eps=bn_eps, dw_activation=None, pw_activation=(lambda: L.PReLU(shape=(down_channels,))), se_reduction=se_reduction) in_channels = down_channels self.main_branch = SimpleSequential() with self.main_branch.init_scope(): for i, out_channels in enumerate(channels_list): use_residual = (use_residual_list[i] == 1) ksizes = ksizes_list[i] scale_factors = scale_factors_list[i] setattr(self.main_branch, "block{}".format(i + 1), ESPBlock( in_channels=in_channels, out_channels=out_channels, ksizes=ksizes, scale_factors=scale_factors, use_residual=use_residual, in_size=((in_size[0] // 2, in_size[1] // 2) if in_size else None), bn_eps=bn_eps)) in_channels = out_channels self.preactiv = PreActivation( in_channels=(down_channels + in_channels), bn_eps=bn_eps) def __call__(self, x): x = self.down_conv(x) y = self.main_branch(x) x = F.concat((x, y), axis=1) x = self.preactiv(x) return x, y class SBEncoderInitBlock(Chain): """ SB encoder specific initial block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. out_channels : int Number of output channels. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. """ def __init__(self, in_channels, mid_channels, out_channels, bn_eps): super(SBEncoderInitBlock, self).__init__() with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2, bn_eps=bn_eps, activation=(lambda: L.PReLU(shape=(mid_channels,)))) self.conv2 = dwsconv3x3_block( in_channels=mid_channels, out_channels=out_channels, stride=2, dw_use_bn=False, bn_eps=bn_eps, dw_activation=None, pw_activation=(lambda: L.PReLU(shape=(out_channels,))), se_reduction=1) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class SBEncoder(Chain): """ SB encoder for SINet. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of input channels. init_block_channels : list int Number of output channels for convolutions in the initial block. down_channels_list : list of int Number of downsample channels for each residual block. channels_list : list of list of int Number of output channels for all residual block. ksizes_list : list of list of int Convolution window size for each residual block. scale_factors_list : list of list of int Scale factor for each residual block. use_residual_list : list of list of int List of flags for using residual in each residual block. in_size : tuple of 2 int Spatial size of the output tensor for the bilinear upsampling operation. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, init_block_channels, down_channels_list, channels_list, ksizes_list, scale_factors_list, use_residual_list, in_size, bn_eps): super(SBEncoder, self).__init__() with self.init_scope(): self.init_block = SBEncoderInitBlock( in_channels=in_channels, mid_channels=init_block_channels[0], out_channels=init_block_channels[1], bn_eps=bn_eps) in_channels = init_block_channels[1] self.stage1 = SBStage( in_channels=in_channels, down_channels=down_channels_list[0], channels_list=channels_list[0], ksizes_list=ksizes_list[0], scale_factors_list=scale_factors_list[0], use_residual_list=use_residual_list[0], se_reduction=1, in_size=((in_size[0] // 4, in_size[1] // 4) if in_size else None), bn_eps=bn_eps) in_channels = down_channels_list[0] + channels_list[0][-1] self.stage2 = SBStage( in_channels=in_channels, down_channels=down_channels_list[1], channels_list=channels_list[1], ksizes_list=ksizes_list[1], scale_factors_list=scale_factors_list[1], use_residual_list=use_residual_list[1], se_reduction=2, in_size=((in_size[0] // 8, in_size[1] // 8) if in_size else None), bn_eps=bn_eps) in_channels = down_channels_list[1] + channels_list[1][-1] self.output = conv1x1( in_channels=in_channels, out_channels=out_channels) def __call__(self, x): y1 = self.init_block(x) x, y2 = self.stage1(y1) x, _ = self.stage2(x) x = self.output(x) return x, y2, y1 class SBDecodeBlock(Chain): """ SB decoder block for SINet. Parameters: ---------- channels : int Number of output classes. out_size : tuple of 2 int Spatial size of the output tensor for the bilinear upsampling operation. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps, out_size, channels): super(SBDecodeBlock, self).__init__() with self.init_scope(): self.up = InterpolationBlock( scale_factor=2, out_size=out_size) self.bn = L.BatchNormalization( size=channels, eps=bn_eps) def __call__(self, x, y): x = self.up(x) x = self.bn(x) w_conf = F.softmax(x) w_max = F.broadcast_to(F.expand_dims(F.max(w_conf, axis=1), axis=1), x.shape) x = y * (1 - w_max) + x return x class SBDecoder(Chain): """ SB decoder for SINet. Parameters: ---------- dim2 : int Size of dimension #2. classes : int Number of segmentation classes. out_size : tuple of 2 int Spatial size of the output tensor for the bilinear upsampling operation. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, dim2, classes, out_size, bn_eps): super(SBDecoder, self).__init__() with self.init_scope(): self.decode1 = SBDecodeBlock( channels=classes, out_size=((out_size[0] // 8, out_size[1] // 8) if out_size else None), bn_eps=bn_eps) self.decode2 = SBDecodeBlock( channels=classes, out_size=((out_size[0] // 4, out_size[1] // 4) if out_size else None), bn_eps=bn_eps) self.conv3c = conv1x1_block( in_channels=dim2, out_channels=classes, bn_eps=bn_eps, activation=(lambda: L.PReLU(shape=(classes,)))) self.output = L.Deconvolution2D( in_channels=classes, out_channels=classes, ksize=2, stride=2, pad=0, # output_pad=0, nobias=True) self.up = InterpolationBlock(scale_factor=2) def __call__(self, y3, y2, y1): y2 = self.conv3c(y2) x = self.decode1(y3, y2) x = self.decode2(x, y1) x = self.output(x) x = self.up(x) return x class SINet(Chain): """ SINet model from 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and Information Blocking Decoder,' https://arxiv.org/abs/1911.09099. Parameters: ---------- down_channels_list : list of int Number of downsample channels for each residual block. channels_list : list of list of int Number of output channels for all residual block. ksizes_list : list of list of int Convolution window size for each residual block. scale_factors_list : list of list of int Scale factor for each residual block. use_residual_list : list of list of int List of flags for using residual in each residual block. dim2 : int Size of dimension #2. bn_eps : float Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. classes : int, default 21 Number of segmentation classes. """ def __init__(self, down_channels_list, channels_list, ksizes_list, scale_factors_list, use_residual_list, dim2, bn_eps, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), classes=21): super(SINet, self).__init__() assert (fixed_size is not None) assert (in_channels > 0) assert ((in_size[0] % 64 == 0) and (in_size[1] % 64 == 0)) self.in_size = in_size self.classes = classes self.aux = aux with self.init_scope(): init_block_channels = [16, classes] out_channels = classes self.encoder = SBEncoder( in_channels=in_channels, out_channels=out_channels, init_block_channels=init_block_channels, down_channels_list=down_channels_list, channels_list=channels_list, ksizes_list=ksizes_list, scale_factors_list=scale_factors_list, use_residual_list=use_residual_list, in_size=(in_size if fixed_size else None), bn_eps=bn_eps) self.decoder = SBDecoder( dim2=dim2, classes=classes, out_size=(in_size if fixed_size else None), bn_eps=bn_eps) def __call__(self, x): y3, y2, y1 = self.encoder(x) x = self.decoder(y3, y2, y1) if self.aux: return x, y3 else: return x def get_sinet(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create SINet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ ksizes_list = [ [[3, 5], [3, 3], [3, 3]], [[3, 5], [3, 3], [5, 5], [3, 5], [3, 5], [3, 5], [3, 3], [5, 5], [3, 5], [3, 5]]] scale_factors_list = [ [[1, 1], [0, 1], [0, 1]], [[1, 1], [0, 1], [1, 4], [2, 8], [1, 1], [1, 1], [0, 1], [1, 8], [2, 4], [0, 2]]] chnn = 4 dims = [24] + [24 * (i + 2) + 4 * (chnn - 1) for i in range(3)] dim1 = dims[0] dim2 = dims[1] dim3 = dims[2] dim4 = dims[3] p = len(ksizes_list[0]) q = len(ksizes_list[1]) channels_list = [[dim2] * p, ([dim3] * (q // 2)) + ([dim4] * (q - q // 2))] use_residual_list = [[0] + ([1] * (p - 1)), [0] + ([1] * (q // 2 - 1)) + [0] + ([1] * (q - q // 2 - 1))] down_channels_list = [dim1, dim2] net = SINet( down_channels_list=down_channels_list, channels_list=channels_list, ksizes_list=ksizes_list, scale_factors_list=scale_factors_list, use_residual_list=use_residual_list, dim2=dims[1], **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def sinet_cityscapes(classes=19, **kwargs): """ SINet model for Cityscapes from 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and Information Blocking Decoder,' https://arxiv.org/abs/1911.09099. Parameters: ---------- classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sinet(classes=classes, bn_eps=1e-3, model_name="sinet_cityscapes", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False in_size = (1024, 2048) aux = False fixed_size = False pretrained = False models = [ sinet_cityscapes, ] for model in models: net = model(pretrained=pretrained, aux=aux, fixed_size=fixed_size) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != sinet_cityscapes or weight_count == 119418) batch = 14 x = np.zeros((batch, 3, in_size[0], in_size[1]), np.float32) ys = net(x) y = ys[0] if aux else ys assert (y.shape == (batch, 19, in_size[0], in_size[1])) if __name__ == "__main__": _test()
36,537
31.917117
118
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/shufflenetv2b.py
""" ShuffleNet V2 for ImageNet-1K, implemented in Chainer. The alternative version. Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. """ __all__ = ['ShuffleNetV2b', 'shufflenetv2b_wd2', 'shufflenetv2b_w1', 'shufflenetv2b_w3d2', 'shufflenetv2b_w2'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, ChannelShuffle, ChannelShuffle2, SEBlock,\ SimpleSequential class ShuffleUnit(Chain): """ ShuffleNetV2(b) unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. downsample : bool Whether do downsample. use_se : bool Whether to use SE block. use_residual : bool Whether to use residual connection. shuffle_group_first : bool Whether to use channel shuffle in group first mode. """ def __init__(self, in_channels, out_channels, downsample, use_se, use_residual, shuffle_group_first): super(ShuffleUnit, self).__init__() self.downsample = downsample self.use_se = use_se self.use_residual = use_residual mid_channels = out_channels // 2 in_channels2 = in_channels // 2 assert (in_channels % 2 == 0) y2_in_channels = (in_channels if downsample else in_channels2) y2_out_channels = out_channels - y2_in_channels with self.init_scope(): self.conv1 = conv1x1_block( in_channels=y2_in_channels, out_channels=mid_channels) self.dconv = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=(2 if self.downsample else 1), activation=None) self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=y2_out_channels) if self.use_se: self.se = SEBlock(channels=y2_out_channels) if downsample: self.shortcut_dconv = dwconv3x3_block( in_channels=in_channels, out_channels=in_channels, stride=2, activation=None) self.shortcut_conv = conv1x1_block( in_channels=in_channels, out_channels=in_channels) if shuffle_group_first: self.c_shuffle = ChannelShuffle( channels=out_channels, groups=2) else: self.c_shuffle = ChannelShuffle2( channels=out_channels, groups=2) def __call__(self, x): if self.downsample: y1 = self.shortcut_dconv(x) y1 = self.shortcut_conv(y1) x2 = x else: y1, x2 = F.split_axis(x, indices_or_sections=2, axis=1) y2 = self.conv1(x2) y2 = self.dconv(y2) y2 = self.conv2(y2) if self.use_se: y2 = self.se(y2) if self.use_residual and not self.downsample: y2 = y2 + x2 x = F.concat((y1, y2), axis=1) x = self.c_shuffle(x) return x class ShuffleInitBlock(Chain): """ ShuffleNetV2(b) specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ShuffleInitBlock, self).__init__() with self.init_scope(): self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2) self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) def __call__(self, x): x = self.conv(x) x = self.pool(x) return x class ShuffleNetV2b(Chain): """ ShuffleNetV2(b) model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. use_se : bool, default False Whether to use SE block. use_residual : bool, default False Whether to use residual connections. shuffle_group_first : bool, default True Whether to use channel shuffle in group first mode. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, use_se=False, use_residual=False, shuffle_group_first=True, in_channels=3, in_size=(224, 224), classes=1000): super(ShuffleNetV2b, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", ShuffleInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): downsample = (j == 0) setattr(stage, "unit{}".format(j + 1), ShuffleUnit( in_channels=in_channels, out_channels=out_channels, downsample=downsample, use_se=use_se, use_residual=use_residual, shuffle_group_first=shuffle_group_first)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels)) in_channels = final_block_channels setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_shufflenetv2b(width_scale, shuffle_group_first=True, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ShuffleNetV2(b) model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. shuffle_group_first : bool, default True Whether to use channel shuffle in group first mode. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ init_block_channels = 24 final_block_channels = 1024 layers = [4, 8, 4] channels_per_layers = [116, 232, 464] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] if width_scale > 1.5: final_block_channels = int(final_block_channels * width_scale) net = ShuffleNetV2b( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, shuffle_group_first=shuffle_group_first, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def shufflenetv2b_wd2(**kwargs): """ ShuffleNetV2(b) 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenetv2b( width_scale=(12.0 / 29.0), shuffle_group_first=True, model_name="shufflenetv2b_wd2", **kwargs) def shufflenetv2b_w1(**kwargs): """ ShuffleNetV2(b) 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenetv2b( width_scale=1.0, shuffle_group_first=True, model_name="shufflenetv2b_w1", **kwargs) def shufflenetv2b_w3d2(**kwargs): """ ShuffleNetV2(b) 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenetv2b( width_scale=(44.0 / 29.0), shuffle_group_first=True, model_name="shufflenetv2b_w3d2", **kwargs) def shufflenetv2b_w2(**kwargs): """ ShuffleNetV2(b) 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenetv2b( width_scale=(61.0 / 29.0), shuffle_group_first=True, model_name="shufflenetv2b_w2", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ shufflenetv2b_wd2, shufflenetv2b_w1, shufflenetv2b_w3d2, shufflenetv2b_w2, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != shufflenetv2b_wd2 or weight_count == 1366792) assert (model != shufflenetv2b_w1 or weight_count == 2279760) assert (model != shufflenetv2b_w3d2 or weight_count == 4410194) assert (model != shufflenetv2b_w2 or weight_count == 7611290) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
12,881
32.115681
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/sparsenet.py
""" SparseNet for ImageNet-1K, implemented in Chainer. Original paper: 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. """ __all__ = ['SparseNet', 'sparsenet121', 'sparsenet161', 'sparsenet169', 'sparsenet201', 'sparsenet264'] import os import math import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import pre_conv1x1_block, pre_conv3x3_block, SimpleSequential from .preresnet import PreResInitBlock, PreResActivation from .densenet import TransitionBlock def sparsenet_exponential_fetch(lst): """ SparseNet's specific exponential fetch. Parameters: ---------- lst : list List of something. Returns: ------- list Filtered list. """ return [lst[len(lst) - 2**i] for i in range(1 + math.floor(math.log(len(lst), 2)))] class SparseBlock(Chain): """ SparseNet block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels, dropout_rate): super(SparseBlock, self).__init__() self.use_dropout = (dropout_rate != 0.0) bn_size = 4 mid_channels = out_channels * bn_size with self.init_scope(): self.conv1 = pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=out_channels) if self.use_dropout: self.dropout = partial( F.dropout, ratio=dropout_rate) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) if self.use_dropout: x = self.dropout(x) return x class SparseStage(Chain): """ SparseNet stage. Parameters: ---------- in_channels : int Number of input channels. channels_per_stage : list of int Number of output channels for each unit in stage. growth_rate : int Growth rate for blocks. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. do_transition : bool Whether use transition block. """ def __init__(self, in_channels, channels_per_stage, growth_rate, dropout_rate, do_transition): super(SparseStage, self).__init__() self.do_transition = do_transition with self.init_scope(): if self.do_transition: self.trans = TransitionBlock( in_channels=in_channels, out_channels=(in_channels // 2)) in_channels = in_channels // 2 self.blocks = SimpleSequential() with self.blocks.init_scope(): for i, out_channels in enumerate(channels_per_stage): setattr(self.blocks, "block{}".format(i + 1), SparseBlock( in_channels=in_channels, out_channels=growth_rate, dropout_rate=dropout_rate)) in_channels = out_channels def __call__(self, x): if self.do_transition: x = self.trans(x) outs = [x] for block_name in self.blocks.layer_names: y = self.blocks[block_name](x) outs.append(y) flt_outs = sparsenet_exponential_fetch(outs) x = F.concat(tuple(flt_outs), axis=1) return x class SparseNet(Chain): """ SparseNet model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. growth_rate : int Growth rate for blocks. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, growth_rate, dropout_rate=0.0, in_channels=3, in_size=(224, 224), classes=1000): super(SparseNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SparseStage( in_channels=in_channels, channels_per_stage=channels_per_stage, growth_rate=growth_rate, dropout_rate=dropout_rate, do_transition=(i != 0)) in_channels = channels_per_stage[-1] setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "post_activ", PreResActivation( in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_sparsenet(num_layers, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create SparseNet model with specific parameters. Parameters: ---------- num_layers : int Number of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if num_layers == 121: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 24, 16] elif num_layers == 161: init_block_channels = 96 growth_rate = 48 layers = [6, 12, 36, 24] elif num_layers == 169: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 32, 32] elif num_layers == 201: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 48, 32] elif num_layers == 264: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 64, 48] else: raise ValueError("Unsupported SparseNet version with number of layers {}".format(num_layers)) from functools import reduce channels = reduce( lambda xi, yi: xi + [reduce( lambda xj, yj: xj + [sum(sparsenet_exponential_fetch([xj[0]] + [yj[0]] * (yj[1] + 1)))], zip([growth_rate] * yi, range(yi)), [xi[-1][-1] // 2])[1:]], layers, [[init_block_channels * 2]])[1:] net = SparseNet( channels=channels, init_block_channels=init_block_channels, growth_rate=growth_rate, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def sparsenet121(**kwargs): """ SparseNet-121 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sparsenet(num_layers=121, model_name="sparsenet121", **kwargs) def sparsenet161(**kwargs): """ SparseNet-161 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sparsenet(num_layers=161, model_name="sparsenet161", **kwargs) def sparsenet169(**kwargs): """ SparseNet-169 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sparsenet(num_layers=169, model_name="sparsenet169", **kwargs) def sparsenet201(**kwargs): """ SparseNet-201 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sparsenet(num_layers=201, model_name="sparsenet201", **kwargs) def sparsenet264(**kwargs): """ SparseNet-264 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sparsenet(num_layers=264, model_name="sparsenet264", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ sparsenet121, sparsenet161, sparsenet169, sparsenet201, sparsenet264, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != sparsenet121 or weight_count == 3250824) assert (model != sparsenet161 or weight_count == 9853288) assert (model != sparsenet169 or weight_count == 4709864) assert (model != sparsenet201 or weight_count == 5703144) assert (model != sparsenet264 or weight_count == 7717224) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
12,001
30.751323
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/menet.py
""" MENet for ImageNet-1K, implemented in Chainer. Original paper: 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. """ __all__ = ['MENet', 'menet108_8x1_g3', 'menet128_8x1_g4', 'menet160_8x1_g8', 'menet228_12x1_g3', 'menet256_12x1_g4', 'menet348_12x1_g3', 'menet352_12x1_g8', 'menet456_24x1_g3'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, conv3x3, depthwise_conv3x3, ChannelShuffle, SimpleSequential class MEUnit(Chain): """ MENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. side_channels : int Number of side channels. groups : int Number of groups in convolution layers. downsample : bool Whether do downsample. ignore_group : bool Whether ignore group value in the first convolution layer. """ def __init__(self, in_channels, out_channels, side_channels, groups, downsample, ignore_group): super(MEUnit, self).__init__() self.downsample = downsample mid_channels = out_channels // 4 if downsample: out_channels -= in_channels with self.init_scope(): # residual branch self.compress_conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels, groups=(1 if ignore_group else groups)) self.compress_bn1 = L.BatchNormalization(size=mid_channels) self.c_shuffle = ChannelShuffle( channels=mid_channels, groups=groups) self.dw_conv2 = depthwise_conv3x3( channels=mid_channels, stride=(2 if self.downsample else 1)) self.dw_bn2 = L.BatchNormalization(size=mid_channels) self.expand_conv3 = conv1x1( in_channels=mid_channels, out_channels=out_channels, groups=groups) self.expand_bn3 = L.BatchNormalization(size=out_channels) if downsample: self.avgpool = partial( F.average_pooling_2d, ksize=3, stride=2, pad=1) self.activ = F.relu # fusion branch self.s_merge_conv = conv1x1( in_channels=mid_channels, out_channels=side_channels) self.s_merge_bn = L.BatchNormalization(size=side_channels) self.s_conv = conv3x3( in_channels=side_channels, out_channels=side_channels, stride=(2 if self.downsample else 1)) self.s_conv_bn = L.BatchNormalization(size=side_channels) self.s_evolve_conv = conv1x1( in_channels=side_channels, out_channels=mid_channels) self.s_evolve_bn = L.BatchNormalization(size=mid_channels) def __call__(self, x): identity = x # pointwise group convolution 1 x = self.compress_conv1(x) x = self.compress_bn1(x) x = self.activ(x) x = self.c_shuffle(x) # merging y = self.s_merge_conv(x) y = self.s_merge_bn(y) y = self.activ(y) # depthwise convolution (bottleneck) x = self.dw_conv2(x) x = self.dw_bn2(x) # evolution y = self.s_conv(y) y = self.s_conv_bn(y) y = self.activ(y) y = self.s_evolve_conv(y) y = self.s_evolve_bn(y) y = F.sigmoid(y) x = x * y # pointwise group convolution 2 x = self.expand_conv3(x) x = self.expand_bn3(x) # identity branch if self.downsample: identity = self.avgpool(identity) x = F.concat((x, identity), axis=1) else: x = x + identity x = self.activ(x) return x class MEInitBlock(Chain): """ MENet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(MEInitBlock, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=2, pad=1, nobias=True) self.bn = L.BatchNormalization(size=out_channels) self.activ = F.relu self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) def __call__(self, x): x = self.conv(x) x = self.bn(x) x = self.activ(x) x = self.pool(x) return x class MENet(Chain): """ MENet model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. side_channels : int Number of side channels in a ME-unit. groups : int Number of groups in convolution layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, side_channels, groups, in_channels=3, in_size=(224, 224), classes=1000): super(MENet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", MEInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): downsample = (j == 0) ignore_group = (i == 0) and (j == 0) setattr(stage, "unit{}".format(j + 1), MEUnit( in_channels=in_channels, out_channels=out_channels, side_channels=side_channels, groups=groups, downsample=downsample, ignore_group=ignore_group)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_menet(first_stage_channels, side_channels, groups, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create MENet model with specific parameters. Parameters: ---------- first_stage_channels : int Number of output channels at the first stage. side_channels : int Number of side channels in a ME-unit. groups : int Number of groups in convolution layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ layers = [4, 8, 4] if first_stage_channels == 108: init_block_channels = 12 channels_per_layers = [108, 216, 432] elif first_stage_channels == 128: init_block_channels = 12 channels_per_layers = [128, 256, 512] elif first_stage_channels == 160: init_block_channels = 16 channels_per_layers = [160, 320, 640] elif first_stage_channels == 228: init_block_channels = 24 channels_per_layers = [228, 456, 912] elif first_stage_channels == 256: init_block_channels = 24 channels_per_layers = [256, 512, 1024] elif first_stage_channels == 348: init_block_channels = 24 channels_per_layers = [348, 696, 1392] elif first_stage_channels == 352: init_block_channels = 24 channels_per_layers = [352, 704, 1408] elif first_stage_channels == 456: init_block_channels = 48 channels_per_layers = [456, 912, 1824] else: raise ValueError("The {} of `first_stage_channels` is not supported".format(first_stage_channels)) channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = MENet( channels=channels, init_block_channels=init_block_channels, side_channels=side_channels, groups=groups, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def menet108_8x1_g3(**kwargs): """ 108-MENet-8x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=108, side_channels=8, groups=3, model_name="menet108_8x1_g3", **kwargs) def menet128_8x1_g4(**kwargs): """ 128-MENet-8x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=128, side_channels=8, groups=4, model_name="menet128_8x1_g4", **kwargs) def menet160_8x1_g8(**kwargs): """ 160-MENet-8x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=160, side_channels=8, groups=8, model_name="menet160_8x1_g8", **kwargs) def menet228_12x1_g3(**kwargs): """ 228-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=228, side_channels=12, groups=3, model_name="menet228_12x1_g3", **kwargs) def menet256_12x1_g4(**kwargs): """ 256-MENet-12x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=256, side_channels=12, groups=4, model_name="menet256_12x1_g4", **kwargs) def menet348_12x1_g3(**kwargs): """ 348-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=348, side_channels=12, groups=3, model_name="menet348_12x1_g3", **kwargs) def menet352_12x1_g8(**kwargs): """ 352-MENet-12x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=352, side_channels=12, groups=8, model_name="menet352_12x1_g8", **kwargs) def menet456_24x1_g3(**kwargs): """ 456-MENet-24x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=456, side_channels=24, groups=3, model_name="menet456_24x1_g3", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = True models = [ menet108_8x1_g3, menet128_8x1_g4, # menet160_8x1_g8, menet228_12x1_g3, menet256_12x1_g4, menet348_12x1_g3, menet352_12x1_g8, menet456_24x1_g3, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != menet108_8x1_g3 or weight_count == 654516) assert (model != menet128_8x1_g4 or weight_count == 750796) assert (model != menet160_8x1_g8 or weight_count == 850120) assert (model != menet228_12x1_g3 or weight_count == 1806568) assert (model != menet256_12x1_g4 or weight_count == 1888240) assert (model != menet348_12x1_g3 or weight_count == 3368128) assert (model != menet352_12x1_g8 or weight_count == 2272872) assert (model != menet456_24x1_g3 or weight_count == 5304784) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
16,444
33.118257
116
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/voca.py
""" VOCA for speech-driven facial animation, implemented in Chainer. Original paper: 'Capture, Learning, and Synthesis of 3D Speaking Styles,' https://arxiv.org/abs/1905.03079. """ __all__ = ['VOCA', 'voca8flame'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from chainer.serializers import load_npz from .common import ConvBlock, SimpleSequential class VocaEncoder(Chain): """ VOCA encoder. Parameters: ---------- audio_features : int Number of audio features (characters/sounds). audio_window_size : int Size of audio window (for time related audio features). base_persons : int Number of base persons (subjects). encoder_features : int Number of encoder features. """ def __init__(self, audio_features, audio_window_size, base_persons, encoder_features): super(VocaEncoder, self).__init__() self.audio_window_size = audio_window_size channels = (32, 32, 64, 64) fc1_channels = 128 with self.init_scope(): self.bn = L.BatchNormalization( size=1, eps=1e-5) in_channels = audio_features + base_persons self.branch = SimpleSequential() with self.branch.init_scope(): for i, out_channels in enumerate(channels): setattr(self.branch, "conv{}".format(i + 1), ConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=(3, 1), stride=(2, 1), pad=(1, 0), use_bias=True, use_bn=False)) in_channels = out_channels in_channels += base_persons self.fc1 = L.Linear( in_size=in_channels, out_size=fc1_channels) self.fc2 = L.Linear( in_size=fc1_channels, out_size=encoder_features) def __call__(self, x, pid): x = self.bn(x) x = F.swapaxes(x, axis1=1, axis2=3) y = F.expand_dims(F.expand_dims(pid, axis=-1), axis=-1) y = F.tile(y, reps=(1, 1, self.audio_window_size, 1)) x = F.concat((x, y), axis=1) x = self.branch(x) x = F.reshape(x, shape=(x.shape[0], -1)) x = F.concat((x, pid), axis=1) x = self.fc1(x) x = F.tanh(x) x = self.fc2(x) return x class VOCA(Chain): """ VOCA model from 'Capture, Learning, and Synthesis of 3D Speaking Styles,' https://arxiv.org/abs/1905.03079. Parameters: ---------- audio_features : int, default 29 Number of audio features (characters/sounds). audio_window_size : int, default 16 Size of audio window (for time related audio features). base_persons : int, default 8 Number of base persons (subjects). encoder_features : int, default 50 Number of encoder features. vertices : int, default 5023 Number of 3D geometry vertices. """ def __init__(self, audio_features=29, audio_window_size=16, base_persons=8, encoder_features=50, vertices=5023): super(VOCA, self).__init__() self.base_persons = base_persons with self.init_scope(): self.encoder = VocaEncoder( audio_features=audio_features, audio_window_size=audio_window_size, base_persons=base_persons, encoder_features=encoder_features) self.decoder = L.Linear( in_size=encoder_features, out_size=(3 * vertices)) def __call__(self, x, pid): pid = self.xp.eye(self.base_persons, dtype=pid.dtype)[pid.astype("int32")] x = self.encoder(x, pid) x = self.decoder(x) x = F.reshape(x, shape=(x.shape[0], 1, -1, 3)) return x def get_voca(base_persons, vertices, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create VOCA model with specific parameters. Parameters: ---------- base_persons : int Number of base persons (subjects). vertices : int Number of 3D geometry vertices. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ net = VOCA( base_persons=base_persons, vertices=vertices, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def voca8flame(**kwargs): """ VOCA-8-FLAME model for 8 base persons and FLAME topology from 'Capture, Learning, and Synthesis of 3D Speaking Styles,' https://arxiv.org/abs/1905.03079. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_voca(base_persons=8, vertices=5023, model_name="voca8flame", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ voca8flame, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != voca8flame or weight_count == 809563) batch = 14 audio_features = 29 audio_window_size = 16 vertices = 5023 x = np.random.rand(batch, 1, audio_window_size, audio_features).astype(np.float32) pid = np.full(shape=(batch,), fill_value=3, dtype=np.float32) y = net(x, pid) assert (y.shape == (batch, 1, vertices, 3)) if __name__ == "__main__": _test()
6,615
29.915888
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/shakeshakeresnet_cifar.py
""" Shake-Shake-ResNet for CIFAR/SVHN, implemented in Chainer. Original paper: 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. """ __all__ = ['CIFARShakeShakeResNet', 'shakeshakeresnet20_2x16d_cifar10', 'shakeshakeresnet20_2x16d_cifar100', 'shakeshakeresnet20_2x16d_svhn', 'shakeshakeresnet26_2x32d_cifar10', 'shakeshakeresnet26_2x32d_cifar100', 'shakeshakeresnet26_2x32d_svhn'] import os import chainer from chainer import backend import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, conv3x3_block, SimpleSequential from .resnet import ResBlock, ResBottleneck class ShakeShake(chainer.function.Function): """ Shake-Shake function. """ def forward(self, inputs): x1, x2 = inputs if chainer.config.train: xp = backend.get_array_module(x1) alpha = xp.empty((x1.shape[0], 1, 1, 1), dtype=x1.dtype) for i in range(len(alpha)): alpha[i] = xp.random.rand() return alpha * x1 + (1 - alpha) * x2, else: return 0.5 * (x1 + x2), def backward(self, inputs, grad_outputs): dy, = grad_outputs xp = backend.get_array_module(dy) beta = xp.empty((dy.shape[0], 1, 1, 1), dtype=dy.dtype) for i in range(len(beta)): beta[i] = xp.random.rand() return beta * dy, (xp.ones(dy.shape, dtype=dy.dtype) - beta) * dy class ShakeShakeShortcut(Chain): """ Shake-Shake-ResNet shortcut. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(ShakeShakeShortcut, self).__init__() assert (out_channels % 2 == 0) mid_channels = out_channels // 2 with self.init_scope(): self.pool = partial( F.average_pooling_2d, ksize=1, stride=stride) self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv1x1( in_channels=in_channels, out_channels=mid_channels) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5) def __call__(self, x): x1 = self.pool(x) x1 = self.conv1(x1) x2 = x[:, :, :-1, :-1] x2 = F.pad(x2, pad_width=((0, 0), (0, 0), (1, 0), (1, 0)), mode="constant", constant_values=0) x2 = self.pool(x2) x2 = self.conv2(x2) x = F.concat((x1, x2), axis=1) x = self.bn(x) return x class ShakeShakeResUnit(Chain): """ Shake-Shake-ResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. """ def __init__(self, in_channels, out_channels, stride, bottleneck): super(ShakeShakeResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) branch_class = ResBottleneck if bottleneck else ResBlock with self.init_scope(): self.branch1 = branch_class( in_channels=in_channels, out_channels=out_channels, stride=stride) self.branch2 = branch_class( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_branch = ShakeShakeShortcut( in_channels=in_channels, out_channels=out_channels, stride=stride) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_branch(x) else: identity = x x1 = self.branch1(x) x2 = self.branch2(x) x = ShakeShake()(x1, x2) + identity x = self.activ(x) return x class CIFARShakeShakeResNet(Chain): """ Shake-Shake-ResNet model for CIFAR from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), classes=10): super(CIFARShakeShakeResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), ShakeShakeResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_shakeshakeresnet_cifar(classes, blocks, bottleneck, first_stage_channels=16, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create Shake-Shake-ResNet model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. first_stage_channels : int, default 16 Number of output channels for the first stage. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 init_block_channels = 16 from functools import reduce channels_per_layers = reduce(lambda x, y: x + [x[-1] * 2], range(2), [first_stage_channels]) channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARShakeShakeResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def shakeshakeresnet20_2x16d_cifar10(classes=10, **kwargs): """ Shake-Shake-ResNet-20-2x16d model for CIFAR-10 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16, model_name="shakeshakeresnet20_2x16d_cifar10", **kwargs) def shakeshakeresnet20_2x16d_cifar100(classes=100, **kwargs): """ Shake-Shake-ResNet-20-2x16d model for CIFAR-100 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16, model_name="shakeshakeresnet20_2x16d_cifar100", **kwargs) def shakeshakeresnet20_2x16d_svhn(classes=10, **kwargs): """ Shake-Shake-ResNet-20-2x16d model for SVHN from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16, model_name="shakeshakeresnet20_2x16d_svhn", **kwargs) def shakeshakeresnet26_2x32d_cifar10(classes=10, **kwargs): """ Shake-Shake-ResNet-26-2x32d model for CIFAR-10 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32, model_name="shakeshakeresnet26_2x32d_cifar10", **kwargs) def shakeshakeresnet26_2x32d_cifar100(classes=100, **kwargs): """ Shake-Shake-ResNet-26-2x32d model for CIFAR-100 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32, model_name="shakeshakeresnet26_2x32d_cifar100", **kwargs) def shakeshakeresnet26_2x32d_svhn(classes=10, **kwargs): """ Shake-Shake-ResNet-26-2x32d model for SVHN from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32, model_name="shakeshakeresnet26_2x32d_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (shakeshakeresnet20_2x16d_cifar10, 10), (shakeshakeresnet20_2x16d_cifar100, 100), (shakeshakeresnet20_2x16d_svhn, 10), (shakeshakeresnet26_2x32d_cifar10, 10), (shakeshakeresnet26_2x32d_cifar100, 100), (shakeshakeresnet26_2x32d_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != shakeshakeresnet20_2x16d_cifar10 or weight_count == 541082) assert (model != shakeshakeresnet20_2x16d_cifar100 or weight_count == 546932) assert (model != shakeshakeresnet20_2x16d_svhn or weight_count == 541082) assert (model != shakeshakeresnet26_2x32d_cifar10 or weight_count == 2923162) assert (model != shakeshakeresnet26_2x32d_cifar100 or weight_count == 2934772) assert (model != shakeshakeresnet26_2x32d_svhn or weight_count == 2923162) x = np.zeros((14, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (14, classes)) if __name__ == "__main__": _test()
14,965
34.548694
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/wrn_cifar.py
""" WRN for CIFAR/SVHN, implemented in Chainer. Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. """ __all__ = ['CIFARWRN', 'wrn16_10_cifar10', 'wrn16_10_cifar100', 'wrn16_10_svhn', 'wrn28_10_cifar10', 'wrn28_10_cifar100', 'wrn28_10_svhn', 'wrn40_8_cifar10', 'wrn40_8_cifar100', 'wrn40_8_svhn'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3, SimpleSequential from .preresnet import PreResUnit, PreResActivation class CIFARWRN(Chain): """ WRN model for CIFAR from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, in_channels=3, in_size=(32, 32), classes=10): super(CIFARWRN, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), PreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=False, conv1_stride=False)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "post_activ", PreResActivation( in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_wrn_cifar(classes, blocks, width_factor, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create WRN model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. width_factor : int Wide scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert ((blocks - 4) % 6 == 0) layers = [(blocks - 4) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci * width_factor] * li for (ci, li) in zip(channels_per_layers, layers)] net = CIFARWRN( channels=channels, init_block_channels=init_block_channels, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def wrn16_10_cifar10(classes=10, **kwargs): """ WRN-16-10 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn_cifar(classes=classes, blocks=16, width_factor=10, model_name="wrn16_10_cifar10", **kwargs) def wrn16_10_cifar100(classes=100, **kwargs): """ WRN-16-10 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn_cifar(classes=classes, blocks=16, width_factor=10, model_name="wrn16_10_cifar100", **kwargs) def wrn16_10_svhn(classes=10, **kwargs): """ WRN-16-10 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn_cifar(classes=classes, blocks=16, width_factor=10, model_name="wrn16_10_svhn", **kwargs) def wrn28_10_cifar10(classes=10, **kwargs): """ WRN-28-10 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn_cifar(classes=classes, blocks=28, width_factor=10, model_name="wrn28_10_cifar10", **kwargs) def wrn28_10_cifar100(classes=100, **kwargs): """ WRN-28-10 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn_cifar(classes=classes, blocks=28, width_factor=10, model_name="wrn28_10_cifar100", **kwargs) def wrn28_10_svhn(classes=10, **kwargs): """ WRN-28-10 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn_cifar(classes=classes, blocks=28, width_factor=10, model_name="wrn28_10_svhn", **kwargs) def wrn40_8_cifar10(classes=10, **kwargs): """ WRN-40-8 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn_cifar(classes=classes, blocks=40, width_factor=8, model_name="wrn40_8_cifar10", **kwargs) def wrn40_8_cifar100(classes=100, **kwargs): """ WRN-40-8 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn_cifar(classes=classes, blocks=40, width_factor=8, model_name="wrn40_8_cifar100", **kwargs) def wrn40_8_svhn(classes=10, **kwargs): """ WRN-40-8 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn_cifar(classes=classes, blocks=40, width_factor=8, model_name="wrn40_8_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (wrn16_10_cifar10, 10), (wrn16_10_cifar100, 100), (wrn16_10_svhn, 10), (wrn28_10_cifar10, 10), (wrn28_10_cifar100, 100), (wrn28_10_svhn, 10), (wrn40_8_cifar10, 10), (wrn40_8_cifar100, 100), (wrn40_8_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != wrn16_10_cifar10 or weight_count == 17116634) assert (model != wrn16_10_cifar100 or weight_count == 17174324) assert (model != wrn16_10_svhn or weight_count == 17116634) assert (model != wrn28_10_cifar10 or weight_count == 36479194) assert (model != wrn28_10_cifar100 or weight_count == 36536884) assert (model != wrn28_10_svhn or weight_count == 36479194) assert (model != wrn40_8_cifar10 or weight_count == 35748314) assert (model != wrn40_8_cifar100 or weight_count == 35794484) assert (model != wrn40_8_svhn or weight_count == 35748314) x = np.zeros((1, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (1, classes)) if __name__ == "__main__": _test()
11,367
33.871166
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/inceptionresnetv2.py
""" InceptionResNetV2 for ImageNet-1K, implemented in Chainer. Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,' https://arxiv.org/abs/1602.07261. """ __all__ = ['InceptionResNetV2', 'inceptionresnetv2'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, conv3x3_block, SimpleSequential, Concurrent from .inceptionv3 import AvgPoolBranch, Conv1x1Branch, ConvSeqBranch from .inceptionresnetv1 import InceptionAUnit, InceptionBUnit, InceptionCUnit, ReductionAUnit, ReductionBUnit class InceptBlock5b(Chain): """ InceptionResNetV2 type Mixed-5b block. Parameters: ---------- bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps): super(InceptBlock5b, self).__init__() in_channels = 192 with self.init_scope(): self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", Conv1x1Branch( in_channels=in_channels, out_channels=96, bn_eps=bn_eps)) setattr(self.branches, "branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(48, 64), kernel_size_list=(1, 5), strides_list=(1, 1), padding_list=(0, 2), bn_eps=bn_eps)) setattr(self.branches, "branch3", ConvSeqBranch( in_channels=in_channels, out_channels_list=(64, 96, 96), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 1), padding_list=(0, 1, 1), bn_eps=bn_eps)) setattr(self.branches, "branch4", AvgPoolBranch( in_channels=in_channels, out_channels=64, bn_eps=bn_eps, count_include_pad=False)) def __call__(self, x): x = self.branches(x) return x class InceptInitBlock(Chain): """ InceptionResNetV2 specific initial block. Parameters: ---------- in_channels : int Number of input channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, bn_eps): super(InceptInitBlock, self).__init__() with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=32, stride=2, pad=0, bn_eps=bn_eps) self.conv2 = conv3x3_block( in_channels=32, out_channels=32, stride=1, pad=0, bn_eps=bn_eps) self.conv3 = conv3x3_block( in_channels=32, out_channels=64, stride=1, pad=1, bn_eps=bn_eps) self.pool1 = partial( F.max_pooling_2d, ksize=3, stride=2, pad=0, cover_all=False) self.conv4 = conv1x1_block( in_channels=64, out_channels=80, stride=1, pad=0, bn_eps=bn_eps) self.conv5 = conv3x3_block( in_channels=80, out_channels=192, stride=1, pad=0, bn_eps=bn_eps) self.pool2 = partial( F.max_pooling_2d, ksize=3, stride=2, pad=0, cover_all=False) self.block = InceptBlock5b(bn_eps=bn_eps) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.pool1(x) x = self.conv4(x) x = self.conv5(x) x = self.pool2(x) x = self.block(x) return x class InceptionResNetV2(Chain): """ InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,' https://arxiv.org/abs/1602.07261. Parameters: ---------- dropout_rate : float, default 0.0 Fraction of the input units to drop. Must be a number between 0 and 1. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (299, 299) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, dropout_rate=0.0, bn_eps=1e-5, in_channels=3, in_size=(299, 299), classes=1000): super(InceptionResNetV2, self).__init__() self.in_size = in_size self.classes = classes layers = [10, 21, 11] in_channels_list = [320, 1088, 2080] normal_out_channels_list = [[32, 32, 32, 32, 48, 64], [192, 128, 160, 192], [192, 192, 224, 256]] reduction_out_channels_list = [[384, 256, 256, 384], [256, 384, 256, 288, 256, 288, 320]] normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit] reduction_units = [ReductionAUnit, ReductionBUnit] with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", InceptInitBlock( in_channels=in_channels, bn_eps=bn_eps)) in_channels = in_channels_list[0] for i, layers_per_stage in enumerate(layers): stage = SimpleSequential() with stage.init_scope(): for j in range(layers_per_stage): if (j == 0) and (i != 0): unit = reduction_units[i - 1] out_channels_list_per_stage = reduction_out_channels_list[i - 1] else: unit = normal_units[i] out_channels_list_per_stage = normal_out_channels_list[i] if (i == len(layers) - 1) and (j == layers_per_stage - 1): unit_kwargs = {"scale": 1.0, "activate": False} else: unit_kwargs = {} setattr(stage, "unit{}".format(j + 1), unit( in_channels=in_channels, out_channels_list=out_channels_list_per_stage, bn_eps=bn_eps, **unit_kwargs)) if (j == 0) and (i != 0): in_channels = in_channels_list[i] setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, 'final_conv', conv1x1_block( in_channels=2080, out_channels=1536, bn_eps=bn_eps)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) in_channels = 1536 self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) if dropout_rate > 0.0: setattr(self.output, "dropout", partial( F.dropout, ratio=dropout_rate)) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_inceptionresnetv2(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create InceptionResNetV2 model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ net = InceptionResNetV2(**kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def inceptionresnetv2(**kwargs): """ InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,' https://arxiv.org/abs/1602.07261. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_inceptionresnetv2(model_name="inceptionresnetv2", bn_eps=1e-3, **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ inceptionresnetv2, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != inceptionresnetv2 or weight_count == 55843464) x = np.zeros((1, 3, 299, 299), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
10,482
33.59736
117
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/ghostnet.py
""" GhostNet for ImageNet-1K, implemented in Chainer. Original paper: 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907. """ __all__ = ['GhostNet', 'ghostnet'] import os import math import chainer.functions as F from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block,\ dwsconv3x3_block, SEBlock, SimpleSequential class GhostHSigmoid(Chain): """ Approximated sigmoid function, specific for GhostNet. """ def __call__(self, x): return F.clip(x, x_min=0.0, x_max=1.0) class GhostConvBlock(Chain): """ GhostNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activation : function or str or None, default F.relu Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, activation=(lambda: F.relu)): super(GhostConvBlock, self).__init__() main_out_channels = math.ceil(0.5 * out_channels) cheap_out_channels = out_channels - main_out_channels with self.init_scope(): self.main_conv = conv1x1_block( in_channels=in_channels, out_channels=main_out_channels, activation=activation) self.cheap_conv = dwconv3x3_block( in_channels=main_out_channels, out_channels=cheap_out_channels, activation=activation) def __call__(self, x): x = self.main_conv(x) y = self.cheap_conv(x) return F.concat((x, y), axis=1) class GhostExpBlock(Chain): """ GhostNet expansion block for residual path in GhostNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. use_kernel3 : bool Whether to use 3x3 (instead of 5x5) kernel. exp_factor : float Expansion factor. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, stride, use_kernel3, exp_factor, use_se): super(GhostExpBlock, self).__init__() self.use_dw_conv = (stride != 1) self.use_se = use_se mid_channels = int(math.ceil(exp_factor * in_channels)) with self.init_scope(): self.exp_conv = GhostConvBlock( in_channels=in_channels, out_channels=mid_channels) if self.use_dw_conv: dw_conv_class = dwconv3x3_block if use_kernel3 else dwconv5x5_block self.dw_conv = dw_conv_class( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=None) if self.use_se: self.se = SEBlock( channels=mid_channels, reduction=4, out_activation=GhostHSigmoid()) self.pw_conv = GhostConvBlock( in_channels=mid_channels, out_channels=out_channels, activation=None) def __call__(self, x): x = self.exp_conv(x) if self.use_dw_conv: x = self.dw_conv(x) if self.use_se: x = self.se(x) x = self.pw_conv(x) return x class GhostUnit(Chain): """ GhostNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the second convolution layer. use_kernel3 : bool Whether to use 3x3 (instead of 5x5) kernel. exp_factor : float Expansion factor. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, stride, use_kernel3, exp_factor, use_se): super(GhostUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): self.body = GhostExpBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, use_kernel3=use_kernel3, exp_factor=exp_factor, use_se=use_se) if self.resize_identity: self.identity_conv = dwsconv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, pw_activation=None) def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity return x class GhostClassifier(Chain): """ GhostNet classifier. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. """ def __init__(self, in_channels, out_channels, mid_channels): super(GhostClassifier, self).__init__() with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, use_bias=True) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class GhostNet(Chain): """ GhostNet model from 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. classifier_mid_channels : int Number of middle channels for classifier. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. exp_factors : list of list of int Expansion factor for each unit. use_se : list of list of int/bool Using SE-block flag for each unit. first_stride : bool Whether to use stride for the first stage. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, classifier_mid_channels, kernels3, exp_factors, use_se, first_stride, in_channels=3, in_size=(224, 224), classes=1000): super(GhostNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and ((i != 0) or first_stride) else 1 use_kernel3 = kernels3[i][j] == 1 exp_factor = exp_factors[i][j] use_se_flag = use_se[i][j] == 1 setattr(stage, "unit{}".format(j + 1), GhostUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, use_kernel3=use_kernel3, exp_factor=exp_factor, use_se=use_se_flag)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels)) in_channels = final_block_channels setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "final_conv", GhostClassifier( in_channels=in_channels, out_channels=classes, mid_channels=classifier_mid_channels)) setattr(self.output, "final_flatten", partial( F.reshape, shape=(-1, classes))) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_ghostnet(width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create GhostNet model with specific parameters. Parameters: ---------- width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ init_block_channels = 16 channels = [[16], [24, 24], [40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160, 160, 160]] kernels3 = [[1], [1, 1], [0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0]] exp_factors = [[1], [3, 3], [3, 3], [6, 2.5, 2.3, 2.3, 6, 6], [6, 6, 6, 6, 6]] use_se = [[0], [0, 0], [1, 1], [0, 0, 0, 0, 1, 1], [1, 0, 1, 0, 1]] final_block_channels = 960 classifier_mid_channels = 1280 first_stride = False if width_scale != 1.0: channels = [[round_channels(cij * width_scale, divisor=4) for cij in ci] for ci in channels] init_block_channels = round_channels(init_block_channels * width_scale, divisor=4) if width_scale > 1.0: final_block_channels = round_channels(final_block_channels * width_scale, divisor=4) net = GhostNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, classifier_mid_channels=classifier_mid_channels, kernels3=kernels3, exp_factors=exp_factors, use_se=use_se, first_stride=first_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def ghostnet(**kwargs): """ GhostNet model from 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_ghostnet(model_name="ghostnet", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ ghostnet, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != ghostnet or weight_count == 5180840) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
13,315
31.79803
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/efficientnet.py
""" EfficientNet for ImageNet-1K, implemented in Chainer. Original papers: - 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946, - 'Adversarial Examples Improve Image Recognition,' https://arxiv.org/abs/1911.09665. """ __all__ = ['EfficientNet', 'calc_tf_padding', 'EffiInvResUnit', 'EffiInitBlock', 'efficientnet_b0', 'efficientnet_b1', 'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6', 'efficientnet_b7', 'efficientnet_b8', 'efficientnet_b0b', 'efficientnet_b1b', 'efficientnet_b2b', 'efficientnet_b3b', 'efficientnet_b4b', 'efficientnet_b5b', 'efficientnet_b6b', 'efficientnet_b7b', 'efficientnet_b0c', 'efficientnet_b1c', 'efficientnet_b2c', 'efficientnet_b3c', 'efficientnet_b4c', 'efficientnet_b5c', 'efficientnet_b6c', 'efficientnet_b7c', 'efficientnet_b8c'] import os import math import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import round_channels, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock,\ GlobalAvgPool2D, SimpleSequential def calc_tf_padding(x, kernel_size, stride=1, dilation=1): """ Calculate TF-same like padding size. Parameters: ---------- x : tensor Input tensor. kernel_size : int Convolution window size. stride : int, default 1 Strides of the convolution. dilation : int, default 1 Dilation value for convolution layer. Returns: ------- tuple of 4 int The size of the padding. """ height, width = x.shape[2:] oh = math.ceil(height / stride) ow = math.ceil(width / stride) pad_h = max((oh - 1) * stride + (kernel_size - 1) * dilation + 1 - height, 0) pad_w = max((ow - 1) * stride + (kernel_size - 1) * dilation + 1 - width, 0) return (0, 0), (0, 0), (pad_h // 2, pad_h - pad_h // 2), (pad_w // 2, pad_w - pad_w // 2) class EffiDwsConvUnit(Chain): """ EfficientNet specific depthwise separable convolution block/unit with BatchNorms and activations at each convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the second convolution layer. bn_eps : float Small float added to variance in Batch norm. activation : str Name of activation function. tf_mode : bool Whether to use TF-like mode. """ def __init__(self, in_channels, out_channels, stride, bn_eps, activation, tf_mode): super(EffiDwsConvUnit, self).__init__() self.tf_mode = tf_mode self.residual = (in_channels == out_channels) and (stride == 1) with self.init_scope(): self.dw_conv = dwconv3x3_block( in_channels=in_channels, out_channels=in_channels, pad=(0 if tf_mode else 1), bn_eps=bn_eps, activation=activation) self.se = SEBlock( channels=in_channels, reduction=4, mid_activation=activation) self.pw_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps, activation=None) def __call__(self, x): if self.residual: identity = x if self.tf_mode: x = F.pad(x, pad_width=calc_tf_padding(x, kernel_size=3), mode="constant", constant_values=0) x = self.dw_conv(x) x = self.se(x) x = self.pw_conv(x) if self.residual: x = x + identity return x class EffiInvResUnit(Chain): """ EfficientNet inverted residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the second convolution layer. exp_factor : int Factor for expansion of channels. se_factor : int SE reduction factor for each unit. bn_eps : float Small float added to variance in Batch norm. activation : str Name of activation function. tf_mode : bool Whether to use TF-like mode. """ def __init__(self, in_channels, out_channels, kernel_size, stride, exp_factor, se_factor, bn_eps, activation, tf_mode): super(EffiInvResUnit, self).__init__() self.kernel_size = kernel_size self.stride = stride self.tf_mode = tf_mode self.residual = (in_channels == out_channels) and (stride == 1) self.use_se = se_factor > 0 mid_channels = in_channels * exp_factor dwconv_block_fn = dwconv3x3_block if kernel_size == 3 else (dwconv5x5_block if kernel_size == 5 else None) with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bn_eps=bn_eps, activation=activation) self.conv2 = dwconv_block_fn( in_channels=mid_channels, out_channels=mid_channels, stride=stride, pad=(0 if tf_mode else (kernel_size // 2)), bn_eps=bn_eps, activation=activation) if self.use_se: self.se = SEBlock( channels=mid_channels, reduction=(exp_factor * se_factor), mid_activation=activation) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, bn_eps=bn_eps, activation=None) def __call__(self, x): if self.residual: identity = x x = self.conv1(x) if self.tf_mode: x = F.pad(x, pad_width=calc_tf_padding(x, kernel_size=self.kernel_size, stride=self.stride), mode="constant", constant_values=0) x = self.conv2(x) if self.use_se: x = self.se(x) x = self.conv3(x) if self.residual: x = x + identity return x class EffiInitBlock(Chain): """ EfficientNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. activation : str Name of activation function. tf_mode : bool Whether to use TF-like mode. """ def __init__(self, in_channels, out_channels, bn_eps, activation, tf_mode): super(EffiInitBlock, self).__init__() self.tf_mode = tf_mode with self.init_scope(): self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, pad=(0 if tf_mode else 1), bn_eps=bn_eps, activation=activation) def __call__(self, x): if self.tf_mode: x = F.pad(x, pad_width=calc_tf_padding(x, kernel_size=3, stride=2), mode="constant", constant_values=0) x = self.conv(x) return x class EfficientNet(Chain): """ EfficientNet(-B0) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. ksizes : list of list of int Number of kernel sizes for each unit. strides_per_stage : list int Stride value for the first unit of each stage. expansion_factors : list of list of int Number of expansion factors for each unit. dropout_rate : float, default 0.2 Fraction of the input units to drop. Must be a number between 0 and 1. tf_mode : bool, default False Whether to use TF-like mode. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, ksizes, strides_per_stage, expansion_factors, dropout_rate=0.2, tf_mode=False, bn_eps=1e-5, in_channels=3, in_size=(224, 224), classes=1000): super(EfficientNet, self).__init__() self.in_size = in_size self.classes = classes activation = "swish" with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", EffiInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps, activation=activation, tf_mode=tf_mode)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): ksizes_per_stage = ksizes[i] expansion_factors_per_stage = expansion_factors[i] stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): ksize = ksizes_per_stage[j] expansion_factor = expansion_factors_per_stage[j] stride = strides_per_stage[i] if (j == 0) else 1 if i == 0: setattr(stage, "unit{}".format(j + 1), EffiDwsConvUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bn_eps=bn_eps, activation=activation, tf_mode=tf_mode)) else: setattr(stage, "unit{}".format(j + 1), EffiInvResUnit( in_channels=in_channels, out_channels=out_channels, kernel_size=ksize, stride=stride, exp_factor=expansion_factor, se_factor=4, bn_eps=bn_eps, activation=activation, tf_mode=tf_mode)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels, bn_eps=bn_eps, activation=activation)) in_channels = final_block_channels setattr(self.features, "final_pool", GlobalAvgPool2D()) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) if dropout_rate > 0.0: setattr(self.output, "dropout", partial( F.dropout, ratio=dropout_rate)) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_efficientnet(version, in_size, tf_mode=False, bn_eps=1e-5, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create EfficientNet model with specific parameters. Parameters: ---------- version : str Version of EfficientNet ('b0'...'b7'). in_size : tuple of two ints Spatial size of the expected input image. tf_mode : bool, default False Whether to use TF-like mode. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if version == "b0": assert (in_size == (224, 224)) depth_factor = 1.0 width_factor = 1.0 dropout_rate = 0.2 elif version == "b1": assert (in_size == (240, 240)) depth_factor = 1.1 width_factor = 1.0 dropout_rate = 0.2 elif version == "b2": assert (in_size == (260, 260)) depth_factor = 1.2 width_factor = 1.1 dropout_rate = 0.3 elif version == "b3": assert (in_size == (300, 300)) depth_factor = 1.4 width_factor = 1.2 dropout_rate = 0.3 elif version == "b4": assert (in_size == (380, 380)) depth_factor = 1.8 width_factor = 1.4 dropout_rate = 0.4 elif version == "b5": assert (in_size == (456, 456)) depth_factor = 2.2 width_factor = 1.6 dropout_rate = 0.4 elif version == "b6": assert (in_size == (528, 528)) depth_factor = 2.6 width_factor = 1.8 dropout_rate = 0.5 elif version == "b7": assert (in_size == (600, 600)) depth_factor = 3.1 width_factor = 2.0 dropout_rate = 0.5 elif version == "b8": assert (in_size == (672, 672)) depth_factor = 3.6 width_factor = 2.2 dropout_rate = 0.5 else: raise ValueError("Unsupported EfficientNet version {}".format(version)) init_block_channels = 32 layers = [1, 2, 2, 3, 3, 4, 1] downsample = [1, 1, 1, 1, 0, 1, 0] channels_per_layers = [16, 24, 40, 80, 112, 192, 320] expansion_factors_per_layers = [1, 6, 6, 6, 6, 6, 6] kernel_sizes_per_layers = [3, 3, 5, 3, 5, 5, 3] strides_per_stage = [1, 2, 2, 2, 1, 2, 1] final_block_channels = 1280 layers = [int(math.ceil(li * depth_factor)) for li in layers] channels_per_layers = [round_channels(ci * width_factor) for ci in channels_per_layers] from functools import reduce channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(channels_per_layers, layers, downsample), []) ksizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(kernel_sizes_per_layers, layers, downsample), []) expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(expansion_factors_per_layers, layers, downsample), []) strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(strides_per_stage, layers, downsample), []) strides_per_stage = [si[0] for si in strides_per_stage] init_block_channels = round_channels(init_block_channels * width_factor) if width_factor > 1.0: assert (int(final_block_channels * width_factor) == round_channels(final_block_channels * width_factor)) final_block_channels = round_channels(final_block_channels * width_factor) net = EfficientNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, ksizes=ksizes, strides_per_stage=strides_per_stage, expansion_factors=expansion_factors, dropout_rate=dropout_rate, tf_mode=tf_mode, bn_eps=bn_eps, in_size=in_size, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def efficientnet_b0(in_size=(224, 224), **kwargs): """ EfficientNet-B0 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b0", in_size=in_size, model_name="efficientnet_b0", **kwargs) def efficientnet_b1(in_size=(240, 240), **kwargs): """ EfficientNet-B1 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (240, 240) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b1", in_size=in_size, model_name="efficientnet_b1", **kwargs) def efficientnet_b2(in_size=(260, 260), **kwargs): """ EfficientNet-B2 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (260, 260) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b2", in_size=in_size, model_name="efficientnet_b2", **kwargs) def efficientnet_b3(in_size=(300, 300), **kwargs): """ EfficientNet-B3 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (300, 300) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b3", in_size=in_size, model_name="efficientnet_b3", **kwargs) def efficientnet_b4(in_size=(380, 380), **kwargs): """ EfficientNet-B4 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (380, 380) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b4", in_size=in_size, model_name="efficientnet_b4", **kwargs) def efficientnet_b5(in_size=(456, 456), **kwargs): """ EfficientNet-B5 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (456, 456) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b5", in_size=in_size, model_name="efficientnet_b5", **kwargs) def efficientnet_b6(in_size=(528, 528), **kwargs): """ EfficientNet-B6 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (528, 528) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b6", in_size=in_size, model_name="efficientnet_b6", **kwargs) def efficientnet_b7(in_size=(600, 600), **kwargs): """ EfficientNet-B7 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (600, 600) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b7", in_size=in_size, model_name="efficientnet_b7", **kwargs) def efficientnet_b8(in_size=(672, 672), **kwargs): """ EfficientNet-B8 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (672, 672) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b8", in_size=in_size, model_name="efficientnet_b8", **kwargs) def efficientnet_b0b(in_size=(224, 224), **kwargs): """ EfficientNet-B0-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b0", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b0b", **kwargs) def efficientnet_b1b(in_size=(240, 240), **kwargs): """ EfficientNet-B1-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (240, 240) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b1", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b1b", **kwargs) def efficientnet_b2b(in_size=(260, 260), **kwargs): """ EfficientNet-B2-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (260, 260) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b2", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b2b", **kwargs) def efficientnet_b3b(in_size=(300, 300), **kwargs): """ EfficientNet-B3-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (300, 300) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b3b", **kwargs) def efficientnet_b4b(in_size=(380, 380), **kwargs): """ EfficientNet-B4-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (380, 380) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b4b", **kwargs) def efficientnet_b5b(in_size=(456, 456), **kwargs): """ EfficientNet-B5-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (456, 456) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b5", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b5b", **kwargs) def efficientnet_b6b(in_size=(528, 528), **kwargs): """ EfficientNet-B6-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (528, 528) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b6", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b6b", **kwargs) def efficientnet_b7b(in_size=(600, 600), **kwargs): """ EfficientNet-B7-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (600, 600) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b7", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b7b", **kwargs) def efficientnet_b0c(in_size=(224, 224), **kwargs): """ EfficientNet-B0-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b0", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b0c", **kwargs) def efficientnet_b1c(in_size=(240, 240), **kwargs): """ EfficientNet-B1-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (240, 240) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b1", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b1c", **kwargs) def efficientnet_b2c(in_size=(260, 260), **kwargs): """ EfficientNet-B2-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (260, 260) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b2", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b2c", **kwargs) def efficientnet_b3c(in_size=(300, 300), **kwargs): """ EfficientNet-B3-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (300, 300) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b3c", **kwargs) def efficientnet_b4c(in_size=(380, 380), **kwargs): """ EfficientNet-B4-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (380, 380) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b4c", **kwargs) def efficientnet_b5c(in_size=(456, 456), **kwargs): """ EfficientNet-B5-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (456, 456) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b5", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b5c", **kwargs) def efficientnet_b6c(in_size=(528, 528), **kwargs): """ EfficientNet-B6-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (528, 528) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b6", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b6c", **kwargs) def efficientnet_b7c(in_size=(600, 600), **kwargs): """ EfficientNet-B7-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (600, 600) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b7", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b7c", **kwargs) def efficientnet_b8c(in_size=(672, 672), **kwargs): """ EfficientNet-B8-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (672, 672) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet(version="b8", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b8c", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ efficientnet_b0, efficientnet_b1, efficientnet_b2, efficientnet_b3, efficientnet_b4, efficientnet_b5, efficientnet_b6, efficientnet_b7, efficientnet_b8, efficientnet_b0b, efficientnet_b1b, efficientnet_b2b, efficientnet_b3b, efficientnet_b4b, efficientnet_b5b, efficientnet_b6b, efficientnet_b7b, efficientnet_b0c, efficientnet_b1c, efficientnet_b2c, efficientnet_b3c, efficientnet_b4c, efficientnet_b5c, efficientnet_b6c, efficientnet_b7c, efficientnet_b8c, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != efficientnet_b0 or weight_count == 5288548) assert (model != efficientnet_b1 or weight_count == 7794184) assert (model != efficientnet_b2 or weight_count == 9109994) assert (model != efficientnet_b3 or weight_count == 12233232) assert (model != efficientnet_b4 or weight_count == 19341616) assert (model != efficientnet_b5 or weight_count == 30389784) assert (model != efficientnet_b6 or weight_count == 43040704) assert (model != efficientnet_b7 or weight_count == 66347960) assert (model != efficientnet_b8 or weight_count == 87413142) assert (model != efficientnet_b0b or weight_count == 5288548) assert (model != efficientnet_b1b or weight_count == 7794184) assert (model != efficientnet_b2b or weight_count == 9109994) assert (model != efficientnet_b3b or weight_count == 12233232) assert (model != efficientnet_b4b or weight_count == 19341616) assert (model != efficientnet_b5b or weight_count == 30389784) assert (model != efficientnet_b6b or weight_count == 43040704) assert (model != efficientnet_b7b or weight_count == 66347960) x = np.zeros((1, 3, net.in_size[0], net.in_size[1]), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
38,516
36.761765
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/channelnet.py
""" ChannelNet for ImageNet-1K, implemented in Chainer. Original paper: 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise Convolutions,' https://arxiv.org/abs/1809.01330. """ __all__ = ['ChannelNet', 'channelnet'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import SimpleSequential, ReLU6 def dwconv3x3(in_channels, out_channels, stride, use_bias=False): """ 3x3 depthwise version of the standard convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. use_bias : bool, default False Whether the layer uses a bias vector. """ return L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=1, nobias=(not use_bias), groups=out_channels) class ChannetConv(Chain): """ ChannelNet specific convolution block with Batch normalization and ReLU6 activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. dropout_rate : float, default 0.0 Dropout rate. activate : bool, default True Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, groups=1, use_bias=False, dropout_rate=0.0, activate=True): super(ChannetConv, self).__init__() self.use_dropout = (dropout_rate > 0.0) self.activate = activate with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=(not use_bias), dilate=dilate, groups=groups) if self.use_dropout: self.dropout = partial( F.dropout, ratio=dropout_rate) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5) if self.activate: self.activ = ReLU6() def __call__(self, x): x = self.conv(x) if self.use_dropout: x = self.dropout(x) x = self.bn(x) if self.activate: x = self.activ(x) return x def channet_conv1x1(in_channels, out_channels, stride=1, groups=1, use_bias=False, dropout_rate=0.0, activate=True): """ 1x1 version of ChannelNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. dropout_rate : float, default 0.0 Dropout rate. activate : bool, default True Whether activate the convolution block. """ return ChannetConv( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, pad=0, groups=groups, use_bias=use_bias, dropout_rate=dropout_rate, activate=activate) def channet_conv3x3(in_channels, out_channels, stride, pad=1, dilate=1, groups=1, use_bias=False, dropout_rate=0.0, activate=True): """ 3x3 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. dropout_rate : float, default 0.0 Dropout rate. activate : bool, default True Whether activate the convolution block. """ return ChannetConv( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, dilate=dilate, groups=groups, use_bias=use_bias, dropout_rate=dropout_rate, activate=activate) class ChannetDwsConvBlock(Chain): """ ChannelNet specific depthwise separable convolution block with BatchNorms and activations at last convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. groups : int, default 1 Number of groups. dropout_rate : float, default 0.0 Dropout rate. """ def __init__(self, in_channels, out_channels, stride, groups=1, dropout_rate=0.0): super(ChannetDwsConvBlock, self).__init__() with self.init_scope(): self.dw_conv = dwconv3x3( in_channels=in_channels, out_channels=in_channels, stride=stride) self.pw_conv = channet_conv1x1( in_channels=in_channels, out_channels=out_channels, groups=groups, dropout_rate=dropout_rate) def __call__(self, x): x = self.dw_conv(x) x = self.pw_conv(x) return x class SimpleGroupBlock(Chain): """ ChannelNet specific block with a sequence of depthwise separable group convolution layers. Parameters: ---------- channels : int Number of input/output channels. multi_blocks : int Number of DWS layers in the sequence. groups : int Number of groups. dropout_rate : float Dropout rate. """ def __init__(self, channels, multi_blocks, groups, dropout_rate): super(SimpleGroupBlock, self).__init__() with self.init_scope(): self.blocks = SimpleSequential() with self.blocks.init_scope(): for i in range(multi_blocks): setattr(self.blocks, "block{}".format(i + 1), ChannetDwsConvBlock( in_channels=channels, out_channels=channels, stride=1, groups=groups, dropout_rate=dropout_rate)) def __call__(self, x): x = self.blocks(x) return x class ChannelwiseConv2d(Chain): """ ChannelNet specific block with channel-wise convolution. Parameters: ---------- groups : int Number of groups. dropout_rate : float Dropout rate. """ def __init__(self, groups, dropout_rate): super(ChannelwiseConv2d, self).__init__() self.use_dropout = (dropout_rate > 0.0) with self.init_scope(): self.conv = L.Convolution3D( in_channels=1, out_channels=groups, ksize=(4 * groups, 1, 1), stride=(groups, 1, 1), pad=(2 * groups - 1, 0, 0), nobias=True) if self.use_dropout: self.dropout = partial( F.dropout, ratio=dropout_rate) def __call__(self, x): batch, channels, height, width = x.shape x = F.expand_dims(x, axis=1) x = self.conv(x) if self.use_dropout: x = self.dropout(x) x = F.reshape(x, shape=(batch, channels, height, width)) return x class ConvGroupBlock(Chain): """ ChannelNet specific block with a combination of channel-wise convolution, depthwise separable group convolutions. Parameters: ---------- channels : int Number of input/output channels. multi_blocks : int Number of DWS layers in the sequence. groups : int Number of groups. dropout_rate : float Dropout rate. """ def __init__(self, channels, multi_blocks, groups, dropout_rate): super(ConvGroupBlock, self).__init__() with self.init_scope(): self.conv = ChannelwiseConv2d( groups=groups, dropout_rate=dropout_rate) self.block = SimpleGroupBlock( channels=channels, multi_blocks=multi_blocks, groups=groups, dropout_rate=dropout_rate) def __call__(self, x): x = self.conv(x) x = self.block(x) return x class ChannetUnit(Chain): """ ChannelNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : tuple/list of 2 int Number of output channels for each sub-block. strides : int or tuple/list of 2 int Stride of the convolution. multi_blocks : int Number of DWS layers in the sequence. groups : int Number of groups. dropout_rate : float Dropout rate. block_names : tuple/list of 2 str Sub-block names. merge_type : str Type of sub-block output merging. """ def __init__(self, in_channels, out_channels_list, strides, multi_blocks, groups, dropout_rate, block_names, merge_type): super(ChannetUnit, self).__init__() assert (len(block_names) == 2) assert (merge_type in ["seq", "add", "cat"]) self.merge_type = merge_type with self.init_scope(): self.blocks = SimpleSequential() with self.blocks.init_scope(): for i, (out_channels, block_name) in enumerate(zip(out_channels_list, block_names)): stride_i = (strides if i == 0 else 1) if block_name == "channet_conv3x3": setattr(self.blocks, "block{}".format(i + 1), channet_conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride_i, dropout_rate=dropout_rate, activate=False)) elif block_name == "channet_dws_conv_block": setattr(self.blocks, "block{}".format(i + 1), ChannetDwsConvBlock( in_channels=in_channels, out_channels=out_channels, stride=stride_i, dropout_rate=dropout_rate)) elif block_name == "simple_group_block": setattr(self.blocks, "block{}".format(i + 1), SimpleGroupBlock( channels=in_channels, multi_blocks=multi_blocks, groups=groups, dropout_rate=dropout_rate)) elif block_name == "conv_group_block": setattr(self.blocks, "block{}".format(i + 1), ConvGroupBlock( channels=in_channels, multi_blocks=multi_blocks, groups=groups, dropout_rate=dropout_rate)) else: raise NotImplementedError() in_channels = out_channels def __call__(self, x): x_outs = [] for block_name in self.blocks.layer_names: x = self.blocks[block_name](x) x_outs.append(x) if self.merge_type == "add": for i in range(len(x_outs) - 1): x = x + x_outs[i] elif self.merge_type == "cat": x = F.concat(tuple(x_outs), axis=1) return x class ChannelNet(Chain): """ ChannelNet model from 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise Convolutions,' https://arxiv.org/abs/1809.01330. Parameters: ---------- channels : list of list of list of int Number of output channels for each unit. block_names : list of list of list of str Names of blocks for each unit. block_names : list of list of str Merge types for each unit. dropout_rate : float, default 0.0001 Dropout rate. multi_blocks : int, default 2 Block count architectural parameter. groups : int, default 2 Group count architectural parameter. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, block_names, merge_types, dropout_rate=0.0001, multi_blocks=2, groups=2, in_channels=3, in_size=(224, 224), classes=1000): super(ChannelNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) else 1 setattr(stage, "unit{}".format(j + 1), ChannetUnit( in_channels=in_channels, out_channels_list=out_channels, strides=strides, multi_blocks=multi_blocks, groups=groups, dropout_rate=dropout_rate, block_names=block_names[i][j], merge_type=merge_types[i][j])) if merge_types[i][j] == "cat": in_channels = sum(out_channels) else: in_channels = out_channels[-1] setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_channelnet(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ChannelNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ channels = [[[32, 64]], [[128, 128]], [[256, 256]], [[512, 512], [512, 512]], [[1024, 1024]]] block_names = [[["channet_conv3x3", "channet_dws_conv_block"]], [["channet_dws_conv_block", "channet_dws_conv_block"]], [["channet_dws_conv_block", "channet_dws_conv_block"]], [["channet_dws_conv_block", "simple_group_block"], ["conv_group_block", "conv_group_block"]], [["channet_dws_conv_block", "channet_dws_conv_block"]]] merge_types = [["cat"], ["cat"], ["cat"], ["add", "add"], ["seq"]] net = ChannelNet( channels=channels, block_names=block_names, merge_types=merge_types, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def channelnet(**kwargs): """ ChannelNet model from 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise Convolutions,' https://arxiv.org/abs/1809.01330. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_channelnet(model_name="channelnet", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ channelnet, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != channelnet or weight_count == 3875112) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
19,610
31.14918
117
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/pnasnet.py
""" PNASNet for ImageNet-1K, implemented in Chainer. Original paper: 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559. """ __all__ = ['PNASNet', 'pnasnet5large'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, SimpleSequential from .nasnet import nasnet_dual_path_sequential, nasnet_batch_norm, NasConv, NasDwsConv, NasPathBlock, NASNetInitBlock,\ process_with_padding class PnasMaxPoolBlock(Chain): """ PNASNet specific Max pooling layer with extra padding. Parameters: ---------- stride : int or tuple/list of 2 int, default 2 Stride of the convolution. extra_padding : bool, default False Whether to use extra padding. """ def __init__(self, stride=2, extra_padding=False): super(PnasMaxPoolBlock, self).__init__() self.extra_padding = extra_padding with self.init_scope(): self.pool = partial( F.max_pooling_2d, ksize=3, stride=stride, pad=1, cover_all=False) def __call__(self, x): if self.extra_padding: x = process_with_padding(x, self.pool) else: x = self.pool(x) return x def pnas_conv1x1(in_channels, out_channels, stride=1): """ 1x1 version of the PNASNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. """ return NasConv( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, pad=0, groups=1) class DwsBranch(Chain): """ PNASNet specific block with depthwise separable convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. """ def __init__(self, in_channels, out_channels, ksize, stride, extra_padding=False, stem=False): super(DwsBranch, self).__init__() assert (not stem) or (not extra_padding) mid_channels = out_channels if stem else in_channels pad = ksize // 2 with self.init_scope(): self.conv1 = NasDwsConv( in_channels=in_channels, out_channels=mid_channels, ksize=ksize, stride=stride, pad=pad, extra_padding=extra_padding) self.conv2 = NasDwsConv( in_channels=mid_channels, out_channels=out_channels, ksize=ksize, stride=1, pad=pad) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x def dws_branch_k3(in_channels, out_channels, stride=2, extra_padding=False, stem=False): """ 3x3 version of the PNASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 2 Stride of the convolution. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, extra_padding=extra_padding, stem=stem) def dws_branch_k5(in_channels, out_channels, stride=2, extra_padding=False, stem=False): """ 5x5 version of the PNASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 2 Stride of the convolution. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, ksize=5, stride=stride, extra_padding=extra_padding, stem=stem) def dws_branch_k7(in_channels, out_channels, stride=2, extra_padding=False): """ 7x7 version of the PNASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 2 Stride of the convolution. extra_padding : bool, default False Whether to use extra padding. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, ksize=7, stride=stride, extra_padding=extra_padding, stem=False) class PnasMaxPathBlock(Chain): """ PNASNet specific `max path` auxiliary block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(PnasMaxPathBlock, self).__init__() with self.init_scope(): self.maxpool = PnasMaxPoolBlock() self.conv = conv1x1( in_channels=in_channels, out_channels=out_channels) self.bn = nasnet_batch_norm(channels=out_channels) def __call__(self, x): x = self.maxpool(x) x = self.conv(x) x = self.bn(x) return x class PnasBaseUnit(Chain): """ PNASNet base unit. """ def __init__(self): super(PnasBaseUnit, self).__init__() def cell_forward(self, x, x_prev): assert (hasattr(self, 'comb0_left')) x_left = x_prev x_right = x x0 = self.comb0_left(x_left) + self.comb0_right(x_left) x1 = self.comb1_left(x_right) + self.comb1_right(x_right) x2 = self.comb2_left(x_right) + self.comb2_right(x_right) x3 = self.comb3_left(x2) + self.comb3_right(x_right) x4 = self.comb4_left(x_left) + (self.comb4_right(x_right) if self.comb4_right else x_right) x_out = F.concat((x0, x1, x2, x3, x4), axis=1) return x_out class Stem1Unit(PnasBaseUnit): """ PNASNet Stem1 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(Stem1Unit, self).__init__() mid_channels = out_channels // 5 with self.init_scope(): self.conv_1x1 = pnas_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.comb0_left = dws_branch_k5( in_channels=in_channels, out_channels=mid_channels, stem=True) self.comb0_right = PnasMaxPathBlock( in_channels=in_channels, out_channels=mid_channels) self.comb1_left = dws_branch_k7( in_channels=mid_channels, out_channels=mid_channels) self.comb1_right = PnasMaxPoolBlock() self.comb2_left = dws_branch_k5( in_channels=mid_channels, out_channels=mid_channels) self.comb2_right = dws_branch_k3( in_channels=mid_channels, out_channels=mid_channels) self.comb3_left = dws_branch_k3( in_channels=mid_channels, out_channels=mid_channels, stride=1) self.comb3_right = PnasMaxPoolBlock() self.comb4_left = dws_branch_k3( in_channels=in_channels, out_channels=mid_channels, stem=True) self.comb4_right = pnas_conv1x1( in_channels=mid_channels, out_channels=mid_channels, stride=2) def __call__(self, x): x_prev = x x = self.conv_1x1(x) x_out = self.cell_forward(x, x_prev) return x_out class PnasUnit(PnasBaseUnit): """ PNASNet ordinary unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. reduction : bool, default False Whether to use reduction. extra_padding : bool, default False Whether to use extra padding. match_prev_layer_dimensions : bool, default False Whether to match previous layer dimensions. """ def __init__(self, in_channels, prev_in_channels, out_channels, reduction=False, extra_padding=False, match_prev_layer_dimensions=False): super(PnasUnit, self).__init__() mid_channels = out_channels // 5 stride = 2 if reduction else 1 with self.init_scope(): if match_prev_layer_dimensions: self.conv_prev_1x1 = NasPathBlock( in_channels=prev_in_channels, out_channels=mid_channels) else: self.conv_prev_1x1 = pnas_conv1x1( in_channels=prev_in_channels, out_channels=mid_channels) self.conv_1x1 = pnas_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.comb0_left = dws_branch_k5( in_channels=mid_channels, out_channels=mid_channels, stride=stride, extra_padding=extra_padding) self.comb0_right = PnasMaxPoolBlock( stride=stride, extra_padding=extra_padding) self.comb1_left = dws_branch_k7( in_channels=mid_channels, out_channels=mid_channels, stride=stride, extra_padding=extra_padding) self.comb1_right = PnasMaxPoolBlock( stride=stride, extra_padding=extra_padding) self.comb2_left = dws_branch_k5( in_channels=mid_channels, out_channels=mid_channels, stride=stride, extra_padding=extra_padding) self.comb2_right = dws_branch_k3( in_channels=mid_channels, out_channels=mid_channels, stride=stride, extra_padding=extra_padding) self.comb3_left = dws_branch_k3( in_channels=mid_channels, out_channels=mid_channels, stride=1) self.comb3_right = PnasMaxPoolBlock( stride=stride, extra_padding=extra_padding) self.comb4_left = dws_branch_k3( in_channels=mid_channels, out_channels=mid_channels, stride=stride, extra_padding=extra_padding) if reduction: self.comb4_right = pnas_conv1x1( in_channels=mid_channels, out_channels=mid_channels, stride=stride) else: self.comb4_right = None def __call__(self, x, x_prev): x_prev = self.conv_prev_1x1(x_prev) x = self.conv_1x1(x) x_out = self.cell_forward(x, x_prev) return x_out class PNASNet(Chain): """ PNASNet model from 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. stem1_blocks_channels : list of 2 int Number of output channels for the Stem1 unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (331, 331) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, stem1_blocks_channels, in_channels=3, in_size=(331, 331), classes=1000): super(PNASNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = nasnet_dual_path_sequential( return_two=False, first_ordinals=2, last_ordinals=2) with self.features.init_scope(): setattr(self.features, "init_block", NASNetInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels setattr(self.features, "stem1_unit", Stem1Unit( in_channels=in_channels, out_channels=stem1_blocks_channels)) prev_in_channels = in_channels in_channels = stem1_blocks_channels for i, channels_per_stage in enumerate(channels): stage = nasnet_dual_path_sequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): reduction = (j == 0) extra_padding = (j == 0) and (i not in [0, 2]) match_prev_layer_dimensions = (j == 1) or ((j == 0) and (i == 0)) setattr(stage, "unit{}".format(j + 1), PnasUnit( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, reduction=reduction, extra_padding=extra_padding, match_prev_layer_dimensions=match_prev_layer_dimensions)) prev_in_channels = in_channels in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_activ", F.relu) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=11, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "dropout", partial( F.dropout, ratio=0.5)) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_pnasnet(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create PNASNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ repeat = 4 init_block_channels = 96 stem_blocks_channels = [270, 540] norm_channels = [1080, 2160, 4320] channels = [[ci] * repeat for ci in norm_channels] stem1_blocks_channels = stem_blocks_channels[0] channels[0] = [stem_blocks_channels[1]] + channels[0] net = PNASNet( channels=channels, init_block_channels=init_block_channels, stem1_blocks_channels=stem1_blocks_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def pnasnet5large(**kwargs): """ PNASNet-5-Large model from 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pnasnet(model_name="pnasnet5large", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ pnasnet5large, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != pnasnet5large or weight_count == 86057668) x = np.zeros((1, 3, 331, 331), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
18,807
30.139073
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/efficientnetedge.py
""" EfficientNet-Edge for ImageNet-1K, implemented in Chainer. Original paper: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. """ __all__ = ['EfficientNetEdge', 'efficientnet_edge_small_b', 'efficientnet_edge_medium_b', 'efficientnet_edge_large_b'] import os import math import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import round_channels, conv1x1_block, conv3x3_block, SEBlock, SimpleSequential, GlobalAvgPool2D from .efficientnet import EffiInvResUnit, EffiInitBlock class EffiEdgeResUnit(Chain): """ EfficientNet-Edge edge residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the second convolution layer. exp_factor : int Factor for expansion of channels. se_factor : int SE reduction factor for each unit. mid_from_in : bool Whether to use input channel count for middle channel count calculation. use_skip : bool Whether to use skip connection. bn_eps : float Small float added to variance in Batch norm. activation : str Name of activation function. """ def __init__(self, in_channels, out_channels, stride, exp_factor, se_factor, mid_from_in, use_skip, bn_eps, activation): super(EffiEdgeResUnit, self).__init__() self.residual = (in_channels == out_channels) and (stride == 1) and use_skip self.use_se = se_factor > 0 mid_channels = in_channels * exp_factor if mid_from_in else out_channels * exp_factor with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, bn_eps=bn_eps, activation=activation) if self.use_se: self.se = SEBlock( channels=mid_channels, reduction=(exp_factor * se_factor), mid_activation=activation) self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, stride=stride, bn_eps=bn_eps, activation=None) def __call__(self, x): if self.residual: identity = x x = self.conv1(x) if self.use_se: x = self.se(x) x = self.conv2(x) if self.residual: x = x + identity return x class EfficientNetEdge(Chain): """ EfficientNet-Edge model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. ksizes : list of list of int Number of kernel sizes for each unit. strides_per_stage : list int Stride value for the first unit of each stage. expansion_factors : list of list of int Number of expansion factors for each unit. dropout_rate : float, default 0.2 Fraction of the input units to drop. Must be a number between 0 and 1. tf_mode : bool, default False Whether to use TF-like mode. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, ksizes, strides_per_stage, expansion_factors, dropout_rate=0.2, tf_mode=False, bn_eps=1e-5, in_channels=3, in_size=(224, 224), classes=1000): super(EfficientNetEdge, self).__init__() self.in_size = in_size self.classes = classes activation = "relu" with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", EffiInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps, activation=activation, tf_mode=tf_mode)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): ksizes_per_stage = ksizes[i] expansion_factors_per_stage = expansion_factors[i] mid_from_in = (i != 0) use_skip = (i != 0) stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): ksize = ksizes_per_stage[j] expansion_factor = expansion_factors_per_stage[j] stride = strides_per_stage[i] if (j == 0) else 1 if i < 3: setattr(stage, "unit{}".format(j + 1), EffiEdgeResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, exp_factor=expansion_factor, se_factor=0, mid_from_in=mid_from_in, use_skip=use_skip, bn_eps=bn_eps, activation=activation)) else: setattr(stage, "unit{}".format(j + 1), EffiInvResUnit( in_channels=in_channels, out_channels=out_channels, kernel_size=ksize, stride=stride, exp_factor=expansion_factor, se_factor=0, bn_eps=bn_eps, activation=activation, tf_mode=tf_mode)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels, bn_eps=bn_eps, activation=activation)) in_channels = final_block_channels setattr(self.features, "final_pool", GlobalAvgPool2D()) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) if dropout_rate > 0.0: setattr(self.output, "dropout", partial( F.dropout, ratio=dropout_rate)) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_efficientnet_edge(version, in_size, tf_mode=False, bn_eps=1e-5, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create EfficientNet-Edge model with specific parameters. Parameters: ---------- version : str Version of EfficientNet ('small', 'medium', 'large'). in_size : tuple of two ints Spatial size of the expected input image. tf_mode : bool, default False Whether to use TF-like mode. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ dropout_rate = 0.0 if version == "small": assert (in_size == (224, 224)) depth_factor = 1.0 width_factor = 1.0 # dropout_rate = 0.2 elif version == "medium": assert (in_size == (240, 240)) depth_factor = 1.1 width_factor = 1.0 # dropout_rate = 0.2 elif version == "large": assert (in_size == (300, 300)) depth_factor = 1.4 width_factor = 1.2 # dropout_rate = 0.3 else: raise ValueError("Unsupported EfficientNet-Edge version {}".format(version)) init_block_channels = 32 layers = [1, 2, 4, 5, 4, 2] downsample = [1, 1, 1, 1, 0, 1] channels_per_layers = [24, 32, 48, 96, 144, 192] expansion_factors_per_layers = [4, 8, 8, 8, 8, 8] kernel_sizes_per_layers = [3, 3, 3, 5, 5, 5] strides_per_stage = [1, 2, 2, 2, 1, 2] final_block_channels = 1280 layers = [int(math.ceil(li * depth_factor)) for li in layers] channels_per_layers = [round_channels(ci * width_factor) for ci in channels_per_layers] from functools import reduce channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(channels_per_layers, layers, downsample), []) kernel_sizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(kernel_sizes_per_layers, layers, downsample), []) expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(expansion_factors_per_layers, layers, downsample), []) strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(strides_per_stage, layers, downsample), []) strides_per_stage = [si[0] for si in strides_per_stage] init_block_channels = round_channels(init_block_channels * width_factor) if width_factor > 1.0: assert (int(final_block_channels * width_factor) == round_channels(final_block_channels * width_factor)) final_block_channels = round_channels(final_block_channels * width_factor) net = EfficientNetEdge( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, ksizes=kernel_sizes, strides_per_stage=strides_per_stage, expansion_factors=expansion_factors, dropout_rate=dropout_rate, tf_mode=tf_mode, bn_eps=bn_eps, in_size=in_size, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def efficientnet_edge_small_b(in_size=(224, 224), **kwargs): """ EfficientNet-Edge-Small-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet_edge(version="small", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_edge_small_b", **kwargs) def efficientnet_edge_medium_b(in_size=(240, 240), **kwargs): """ EfficientNet-Edge-Medium-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (240, 240) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet_edge(version="medium", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_edge_medium_b", **kwargs) def efficientnet_edge_large_b(in_size=(300, 300), **kwargs): """ EfficientNet-Edge-Large-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (300, 300) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_efficientnet_edge(version="large", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_edge_large_b", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ efficientnet_edge_small_b, efficientnet_edge_medium_b, efficientnet_edge_large_b, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != efficientnet_edge_small_b or weight_count == 5438392) assert (model != efficientnet_edge_medium_b or weight_count == 6899496) assert (model != efficientnet_edge_large_b or weight_count == 10589712) x = np.zeros((1, 3, net.in_size[0], net.in_size[1]), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
15,366
37.610553
118
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/squeezenext.py
""" SqueezeNext for ImageNet-1K, implemented in Chainer. Original paper: 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. """ __all__ = ['SqueezeNext', 'sqnxt23_w1', 'sqnxt23_w3d2', 'sqnxt23_w2', 'sqnxt23v5_w1', 'sqnxt23v5_w3d2', 'sqnxt23v5_w2'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import ConvBlock, conv1x1_block, conv7x7_block, SimpleSequential class SqnxtUnit(Chain): """ SqueezeNext unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(SqnxtUnit, self).__init__() if stride == 2: reduction_den = 1 self.resize_identity = True elif in_channels > out_channels: reduction_den = 4 self.resize_identity = True else: reduction_den = 2 self.resize_identity = False with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=(in_channels // reduction_den), stride=stride, use_bias=True) self.conv2 = conv1x1_block( in_channels=(in_channels // reduction_den), out_channels=(in_channels // (2 * reduction_den)), use_bias=True) self.conv3 = ConvBlock( in_channels=(in_channels // (2 * reduction_den)), out_channels=(in_channels // reduction_den), ksize=(1, 3), stride=1, pad=(0, 1), use_bias=True) self.conv4 = ConvBlock( in_channels=(in_channels // reduction_den), out_channels=(in_channels // reduction_den), ksize=(3, 1), stride=1, pad=(1, 0), use_bias=True) self.conv5 = conv1x1_block( in_channels=(in_channels // reduction_den), out_channels=out_channels, use_bias=True) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=True) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = self.conv5(x) x = x + identity x = self.activ(x) return x class SqnxtInitBlock(Chain): """ SqueezeNext specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(SqnxtInitBlock, self).__init__() with self.init_scope(): self.conv = conv7x7_block( in_channels=in_channels, out_channels=out_channels, stride=2, pad=1, use_bias=True) self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, cover_all=False) def __call__(self, x): x = self.conv(x) x = self.pool(x) return x class SqueezeNext(Chain): """ SqueezeNext model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, in_channels=3, in_size=(224, 224), classes=1000): super(SqueezeNext, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", SqnxtInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), SqnxtUnit( in_channels=in_channels, out_channels=out_channels, stride=stride)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels, use_bias=True)) in_channels = final_block_channels setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_squeezenext(version, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create SqueezeNext model with specific parameters. Parameters: ---------- version : str Version of SqueezeNet ('23' or '23v5'). width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ init_block_channels = 64 final_block_channels = 128 channels_per_layers = [32, 64, 128, 256] if version == '23': layers = [6, 6, 8, 1] elif version == '23v5': layers = [2, 4, 14, 1] else: raise ValueError("Unsupported SqueezeNet version {}".format(version)) channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = int(init_block_channels * width_scale) final_block_channels = int(final_block_channels * width_scale) net = SqueezeNext( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def sqnxt23_w1(**kwargs): """ 1.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_squeezenext(version="23", width_scale=1.0, model_name="sqnxt23_w1", **kwargs) def sqnxt23_w3d2(**kwargs): """ 1.5-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_squeezenext(version="23", width_scale=1.5, model_name="sqnxt23_w3d2", **kwargs) def sqnxt23_w2(**kwargs): """ 2.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_squeezenext(version="23", width_scale=2.0, model_name="sqnxt23_w2", **kwargs) def sqnxt23v5_w1(**kwargs): """ 1.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_squeezenext(version="23v5", width_scale=1.0, model_name="sqnxt23v5_w1", **kwargs) def sqnxt23v5_w3d2(**kwargs): """ 1.5-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_squeezenext(version="23v5", width_scale=1.5, model_name="sqnxt23v5_w3d2", **kwargs) def sqnxt23v5_w2(**kwargs): """ 2.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_squeezenext(version="23v5", width_scale=2.0, model_name="sqnxt23v5_w2", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ sqnxt23_w1, sqnxt23_w3d2, sqnxt23_w2, sqnxt23v5_w1, sqnxt23v5_w3d2, sqnxt23v5_w2, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != sqnxt23_w1 or weight_count == 724056) assert (model != sqnxt23_w3d2 or weight_count == 1511824) assert (model != sqnxt23_w2 or weight_count == 2583752) assert (model != sqnxt23v5_w1 or weight_count == 921816) assert (model != sqnxt23v5_w3d2 or weight_count == 1953616) assert (model != sqnxt23v5_w2 or weight_count == 3366344) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
12,675
32.010417
119
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/xdensenet.py
""" X-DenseNet for ImageNet-1K, implemented in Chainer. Original paper: 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. """ __all__ = ['XDenseNet', 'xdensenet121_2', 'xdensenet161_2', 'xdensenet169_2', 'xdensenet201_2', 'pre_xconv3x3_block', 'XDenseUnit'] import os import chainer import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import SimpleSequential from .preresnet import PreResInitBlock, PreResActivation from .densenet import TransitionBlock class XMaskInit(chainer.initializer.Initializer): """ Returns an initializer performing "X-Net" initialization for masks. Parameters: ---------- expand_ratio : int Ratio of expansion. """ def __init__(self, expand_ratio, **kwargs): super(XMaskInit, self).__init__(**kwargs) assert (expand_ratio > 0) self.expand_ratio = expand_ratio def __call__(self, array): if self.dtype is not None: assert array.dtype == self.dtype xp = chainer.backend.get_array_module(array) shape = array.shape expand_size = max(shape[1] // self.expand_ratio, 1) array[:] = 0 for i in range(shape[0]): jj = xp.random.permutation(shape[1])[:expand_size] array[i, jj, :, :] = 1 class XConvolution2D(L.Convolution2D): """ X-Convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. groups : int, default 1 Number of groups. expand_ratio : int, default 2 Ratio of expansion. """ def __init__(self, in_channels, out_channels, ksize, groups=1, expand_ratio=2, **kwargs): super(XConvolution2D, self).__init__( in_channels=in_channels, out_channels=out_channels, ksize=ksize, groups=groups, **kwargs) if isinstance(ksize, int): ksize = (ksize, ksize) grouped_in_channels = in_channels // groups self.mask = chainer.initializers.generate_array( initializer=XMaskInit(expand_ratio=expand_ratio), shape=(out_channels, grouped_in_channels, ksize[0], ksize[1]), xp=self.xp) self.register_persistent("mask") def forward(self, x): if self.W.array is None: self._initialize_params(x.shape[1]) masked_weight = self.W * self.mask # print("self.W.sum()={}".format(self.W.array.sum())) # print("self.mask.sum()={}".format(self.mask.sum())) # print("masked_weight.sum()={}".format(masked_weight.array.sum())) return F.convolution_2d( x=x, W=masked_weight, b=self.b, stride=self.stride, pad=self.pad, dilate=self.dilate, groups=self.groups) class PreXConvBlock(Chain): """ X-Convolution block with Batch normalization and ReLU pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. return_preact : bool, default False Whether return pre-activation. It's used by PreResNet. activate : bool, default True Whether activate the convolution block. expand_ratio : int, default 2 Ratio of expansion. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, use_bias=False, return_preact=False, activate=True, expand_ratio=2): super(PreXConvBlock, self).__init__() self.return_preact = return_preact self.activate = activate with self.init_scope(): self.bn = L.BatchNormalization( size=in_channels, eps=1e-5) if self.activate: self.activ = F.relu self.conv = XConvolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=(not use_bias), dilate=dilate, expand_ratio=expand_ratio) def __call__(self, x): x = self.bn(x) if self.activate: x = self.activ(x) if self.return_preact: x_pre_activ = x x = self.conv(x) if self.return_preact: return x, x_pre_activ else: return x def pre_xconv1x1_block(in_channels, out_channels, stride=1, use_bias=False, return_preact=False, activate=True, expand_ratio=2): """ 1x1 version of the pre-activated x-convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. use_bias : bool, default False Whether the layer uses a bias vector. return_preact : bool, default False Whether return pre-activation. activate : bool, default True Whether activate the convolution block. expand_ratio : int, default 2 Ratio of expansion. """ return PreXConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, pad=0, use_bias=use_bias, return_preact=return_preact, activate=activate, expand_ratio=expand_ratio) def pre_xconv3x3_block(in_channels, out_channels, stride=1, pad=1, dilate=1, return_preact=False, activate=True, expand_ratio=2): """ 3x3 version of the pre-activated x-convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. return_preact : bool, default False Whether return pre-activation. activate : bool, default True Whether activate the convolution block. expand_ratio : int, default 2 Ratio of expansion. """ return PreXConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, dilate=dilate, return_preact=return_preact, activate=activate, expand_ratio=expand_ratio) class XDenseUnit(Chain): """ X-DenseNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. expand_ratio : int Ratio of expansion. """ def __init__(self, in_channels, out_channels, dropout_rate, expand_ratio): super(XDenseUnit, self).__init__() self.use_dropout = (dropout_rate != 0.0) bn_size = 4 inc_channels = out_channels - in_channels mid_channels = inc_channels * bn_size with self.init_scope(): self.conv1 = pre_xconv1x1_block( in_channels=in_channels, out_channels=mid_channels, expand_ratio=expand_ratio) self.conv2 = pre_xconv3x3_block( in_channels=mid_channels, out_channels=inc_channels, expand_ratio=expand_ratio) if self.use_dropout: self.dropout = partial( F.dropout, ratio=dropout_rate) def __call__(self, x): identity = x x = self.conv1(x) x = self.conv2(x) if self.use_dropout: x = self.dropout(x) x = F.concat((identity, x), axis=1) return x class XDenseNet(Chain): """ X-DenseNet model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. expand_ratio : int, default 2 Ratio of expansion. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, dropout_rate=0.0, expand_ratio=2, in_channels=3, in_size=(224, 224), classes=1000): super(XDenseNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): if i != 0: setattr(stage, "trans{}".format(i + 1), TransitionBlock( in_channels=in_channels, out_channels=(in_channels // 2))) in_channels = in_channels // 2 for j, out_channels in enumerate(channels_per_stage): setattr(stage, "unit{}".format(j + 1), XDenseUnit( in_channels=in_channels, out_channels=out_channels, dropout_rate=dropout_rate, expand_ratio=expand_ratio)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "post_activ", PreResActivation(in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_xdensenet(blocks, expand_ratio=2, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create X-DenseNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. expand_ratio : int, default 2 Ratio of expansion. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 121: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 24, 16] elif blocks == 161: init_block_channels = 96 growth_rate = 48 layers = [6, 12, 36, 24] elif blocks == 169: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 32, 32] elif blocks == 201: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 48, 32] else: raise ValueError("Unsupported X-DenseNet version with number of layers {}".format(blocks)) from functools import reduce channels = reduce( lambda xi, yi: xi + [reduce( lambda xj, yj: xj + [xj[-1] + yj], [growth_rate] * yi, [xi[-1][-1] // 2])[1:]], layers, [[init_block_channels * 2]])[1:] net = XDenseNet( channels=channels, init_block_channels=init_block_channels, expand_ratio=expand_ratio, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def xdensenet121_2(**kwargs): """ X-DenseNet-121-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_xdensenet(blocks=121, model_name="xdensenet121_2", **kwargs) def xdensenet161_2(**kwargs): """ X-DenseNet-161-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_xdensenet(blocks=161, model_name="xdensenet161_2", **kwargs) def xdensenet169_2(**kwargs): """ X-DenseNet-169-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_xdensenet(blocks=169, model_name="xdensenet169_2", **kwargs) def xdensenet201_2(**kwargs): """ X-DenseNet-201-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_xdensenet(blocks=201, model_name="xdensenet201_2", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ xdensenet121_2, xdensenet161_2, xdensenet169_2, xdensenet201_2, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != xdensenet121_2 or weight_count == 7978856) assert (model != xdensenet161_2 or weight_count == 28681000) assert (model != xdensenet169_2 or weight_count == 14149480) assert (model != xdensenet201_2 or weight_count == 20013928) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
17,412
30.717668
117
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/diaresnet_cifar.py
""" DIA-ResNet for CIFAR/SVHN, implemented in Chainer. Original paper: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. """ __all__ = ['CIFARDIAResNet', 'diaresnet20_cifar10', 'diaresnet20_cifar100', 'diaresnet20_svhn', 'diaresnet56_cifar10', 'diaresnet56_cifar100', 'diaresnet56_svhn', 'diaresnet110_cifar10', 'diaresnet110_cifar100', 'diaresnet110_svhn', 'diaresnet164bn_cifar10', 'diaresnet164bn_cifar100', 'diaresnet164bn_svhn', 'diaresnet1001_cifar10', 'diaresnet1001_cifar100', 'diaresnet1001_svhn', 'diaresnet1202_cifar10', 'diaresnet1202_cifar100', 'diaresnet1202_svhn'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3_block, DualPathSequential, SimpleSequential from .diaresnet import DIAAttention, DIAResUnit class CIFARDIAResNet(Chain): """ DIA-ResNet model for CIFAR from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), classes=10): super(CIFARDIAResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential(return_two=False) attention = DIAAttention( in_x_features=channels_per_stage[0], in_h_features=channels_per_stage[0]) with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), DIAResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False, attention=attention, hold_attention=(j == 0))) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_diaresnet_cifar(classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create DIA-ResNet model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARDIAResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def diaresnet20_cifar10(classes=10, **kwargs): """ DIA-ResNet-20 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diaresnet20_cifar10", **kwargs) def diaresnet20_cifar100(classes=100, **kwargs): """ DIA-ResNet-20 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diaresnet20_cifar100", **kwargs) def diaresnet20_svhn(classes=10, **kwargs): """ DIA-ResNet-20 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diaresnet20_svhn", **kwargs) def diaresnet56_cifar10(classes=10, **kwargs): """ DIA-ResNet-56 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diaresnet56_cifar10", **kwargs) def diaresnet56_cifar100(classes=100, **kwargs): """ DIA-ResNet-56 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diaresnet56_cifar100", **kwargs) def diaresnet56_svhn(classes=10, **kwargs): """ DIA-ResNet-56 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diaresnet56_svhn", **kwargs) def diaresnet110_cifar10(classes=10, **kwargs): """ DIA-ResNet-110 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diaresnet110_cifar10", **kwargs) def diaresnet110_cifar100(classes=100, **kwargs): """ DIA-ResNet-110 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diaresnet110_cifar100", **kwargs) def diaresnet110_svhn(classes=10, **kwargs): """ DIA-ResNet-110 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diaresnet110_svhn", **kwargs) def diaresnet164bn_cifar10(classes=10, **kwargs): """ DIA-ResNet-164(BN) model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diaresnet164bn_cifar10", **kwargs) def diaresnet164bn_cifar100(classes=100, **kwargs): """ DIA-ResNet-164(BN) model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diaresnet164bn_cifar100", **kwargs) def diaresnet164bn_svhn(classes=10, **kwargs): """ DIA-ResNet-164(BN) model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diaresnet164bn_svhn", **kwargs) def diaresnet1001_cifar10(classes=10, **kwargs): """ DIA-ResNet-1001 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diaresnet1001_cifar10", **kwargs) def diaresnet1001_cifar100(classes=100, **kwargs): """ DIA-ResNet-1001 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diaresnet1001_cifar100", **kwargs) def diaresnet1001_svhn(classes=10, **kwargs): """ DIA-ResNet-1001 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diaresnet1001_svhn", **kwargs) def diaresnet1202_cifar10(classes=10, **kwargs): """ DIA-ResNet-1202 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diaresnet1202_cifar10", **kwargs) def diaresnet1202_cifar100(classes=100, **kwargs): """ DIA-ResNet-1202 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diaresnet1202_cifar100", **kwargs) def diaresnet1202_svhn(classes=10, **kwargs): """ DIA-ResNet-1202 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diaresnet1202_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (diaresnet20_cifar10, 10), (diaresnet20_cifar100, 100), (diaresnet20_svhn, 10), (diaresnet56_cifar10, 10), (diaresnet56_cifar100, 100), (diaresnet56_svhn, 10), (diaresnet110_cifar10, 10), (diaresnet110_cifar100, 100), (diaresnet110_svhn, 10), (diaresnet164bn_cifar10, 10), (diaresnet164bn_cifar100, 100), (diaresnet164bn_svhn, 10), (diaresnet1001_cifar10, 10), (diaresnet1001_cifar100, 100), (diaresnet1001_svhn, 10), (diaresnet1202_cifar10, 10), (diaresnet1202_cifar100, 100), (diaresnet1202_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != diaresnet20_cifar10 or weight_count == 286866) assert (model != diaresnet20_cifar100 or weight_count == 292716) assert (model != diaresnet20_svhn or weight_count == 286866) assert (model != diaresnet56_cifar10 or weight_count == 870162) assert (model != diaresnet56_cifar100 or weight_count == 876012) assert (model != diaresnet56_svhn or weight_count == 870162) assert (model != diaresnet110_cifar10 or weight_count == 1745106) assert (model != diaresnet110_cifar100 or weight_count == 1750956) assert (model != diaresnet110_svhn or weight_count == 1745106) assert (model != diaresnet164bn_cifar10 or weight_count == 1923002) assert (model != diaresnet164bn_cifar100 or weight_count == 1946132) assert (model != diaresnet164bn_svhn or weight_count == 1923002) assert (model != diaresnet1001_cifar10 or weight_count == 10547450) assert (model != diaresnet1001_cifar100 or weight_count == 10570580) assert (model != diaresnet1001_svhn or weight_count == 10547450) assert (model != diaresnet1202_cifar10 or weight_count == 19438418) assert (model != diaresnet1202_cifar100 or weight_count == 19444268) assert (model != diaresnet1202_svhn or weight_count == 19438418) x = np.zeros((1, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (1, classes)) if __name__ == "__main__": _test()
19,930
35.841035
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/resdropresnet_cifar.py
""" ResDrop-ResNet for CIFAR/SVHN, implemented in Chainer. Original paper: 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382. """ __all__ = ['CIFARResDropResNet', 'resdropresnet20_cifar10', 'resdropresnet20_cifar100', 'resdropresnet20_svhn'] import os from chainer import backend from chainer import config import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, conv3x3_block, SimpleSequential from .resnet import ResBlock, ResBottleneck class ResDropResUnit(Chain): """ ResDrop-ResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. life_prob : float Residual branch life probability. """ def __init__(self, in_channels, out_channels, stride, bottleneck, life_prob): super(ResDropResUnit, self).__init__() self.life_prob = life_prob self.resize_identity = (in_channels != out_channels) or (stride != 1) body_class = ResBottleneck if bottleneck else ResBlock with self.init_scope(): self.body = body_class( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) if config.train: xp = backend.get_array_module(x) b = xp.random.binomial(n=1, p=self.life_prob) x = float(b) / self.life_prob * x x = x + identity x = self.activ(x) return x class CIFARResDropResNet(Chain): """ ResDrop-ResNet model for CIFAR from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. life_probs : list of float Residual branch life probability for each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, life_probs, in_channels=3, in_size=(32, 32), classes=10): super(CIFARResDropResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels k = 0 for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), ResDropResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, life_prob=life_probs[k])) in_channels = out_channels k += 1 setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_resdropresnet_cifar(classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ResDrop-ResNet model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 init_block_channels = 16 channels_per_layers = [16, 32, 64] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] total_layers = sum(layers) final_death_prob = 0.5 life_probs = [1.0 - float(i + 1) / float(total_layers) * final_death_prob for i in range(total_layers)] net = CIFARResDropResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, life_probs=life_probs, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def resdropresnet20_cifar10(classes=10, **kwargs): """ ResDrop-ResNet-20 model for CIFAR-10 from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_cifar10", **kwargs) def resdropresnet20_cifar100(classes=100, **kwargs): """ ResDrop-ResNet-20 model for CIFAR-100 from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_cifar100", **kwargs) def resdropresnet20_svhn(classes=10, **kwargs): """ ResDrop-ResNet-20 model for SVHN from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (resdropresnet20_cifar10, 10), (resdropresnet20_cifar100, 100), (resdropresnet20_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != resdropresnet20_cifar10 or weight_count == 272474) assert (model != resdropresnet20_cifar100 or weight_count == 278324) assert (model != resdropresnet20_svhn or weight_count == 272474) x = np.zeros((14, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (14, classes)) if __name__ == "__main__": _test()
10,196
33.103679
119
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/bisenet.py
""" BiSeNet for CelebAMask-HQ, implemented in Chainer. Original paper: 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1808.00897. """ __all__ = ['BiSeNet', 'bisenet_resnet18_celebamaskhq'] import os import chainer.functions as F from chainer import Chain from chainer.serializers import load_npz from .common import conv1x1, conv1x1_block, conv3x3_block, InterpolationBlock, MultiOutputSequential from .resnet import resnet18 class PyramidPoolingZeroBranch(Chain): """ Pyramid pooling zero branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. in_size : tuple of 2 int Spatial size of output image for the upsampling operation. """ def __init__(self, in_channels, out_channels, in_size): super(PyramidPoolingZeroBranch, self).__init__() self.in_size = in_size with self.init_scope(): self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels) self.up = InterpolationBlock( scale_factor=None, mode="nearest") def __call__(self, x): in_size = self.in_size if self.in_size is not None else x.shape[2:] x = F.average_pooling_2d(x, ksize=x.shape[2:]) x = self.conv(x) x = self.up(x, size=in_size) return x class AttentionRefinementBlock(Chain): """ Attention refinement block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(AttentionRefinementBlock, self).__init__() with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels) self.conv2 = conv1x1_block( in_channels=out_channels, out_channels=out_channels, activation=(lambda: F.sigmoid)) def __call__(self, x): x = self.conv1(x) w = F.average_pooling_2d(x, ksize=x.shape[2:]) w = self.conv2(w) x = x * w return x class PyramidPoolingMainBranch(Chain): """ Pyramid pooling main branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. scale_factor : float Multiplier for spatial size. """ def __init__(self, in_channels, out_channels, scale_factor): super(PyramidPoolingMainBranch, self).__init__() with self.init_scope(): self.att = AttentionRefinementBlock( in_channels=in_channels, out_channels=out_channels) self.up = InterpolationBlock( scale_factor=scale_factor, mode="nearest") self.conv = conv3x3_block( in_channels=out_channels, out_channels=out_channels) def __call__(self, x, y): x = self.att(x) x = x + y x = self.up(x) x = self.conv(x) return x class FeatureFusion(Chain): """ Feature fusion block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. reduction : int, default 4 Squeeze reduction value. """ def __init__(self, in_channels, out_channels, reduction=4): super(FeatureFusion, self).__init__() mid_channels = out_channels // reduction with self.init_scope(): self.conv_merge = conv1x1_block( in_channels=in_channels, out_channels=out_channels) self.conv1 = conv1x1( in_channels=out_channels, out_channels=mid_channels) self.activ = F.relu self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels) self.sigmoid = F.sigmoid def __call__(self, x, y): x = F.concat((x, y), axis=1) x = self.conv_merge(x) w = F.average_pooling_2d(x, ksize=x.shape[2:]) w = self.conv1(w) w = self.activ(w) w = self.conv2(w) w = self.sigmoid(w) x_att = x * w x = x + x_att return x class PyramidPooling(Chain): """ Pyramid Pooling module. Parameters: ---------- x16_in_channels : int Number of input channels for x16. x32_in_channels : int Number of input channels for x32. y_out_channels : int Number of output channels for y-outputs. y32_out_size : tuple of 2 int Spatial size of the y32 tensor. """ def __init__(self, x16_in_channels, x32_in_channels, y_out_channels, y32_out_size): super(PyramidPooling, self).__init__() z_out_channels = 2 * y_out_channels with self.init_scope(): self.pool32 = PyramidPoolingZeroBranch( in_channels=x32_in_channels, out_channels=y_out_channels, in_size=y32_out_size) self.pool16 = PyramidPoolingMainBranch( in_channels=x32_in_channels, out_channels=y_out_channels, scale_factor=2) self.pool8 = PyramidPoolingMainBranch( in_channels=x16_in_channels, out_channels=y_out_channels, scale_factor=2) self.fusion = FeatureFusion( in_channels=z_out_channels, out_channels=z_out_channels) def __call__(self, x8, x16, x32): y32 = self.pool32(x32) y16 = self.pool16(x32, y32) y8 = self.pool8(x16, y16) z8 = self.fusion(x8, y8) return z8, y8, y16 class BiSeHead(Chain): """ BiSeNet head (final) block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, mid_channels, out_channels): super(BiSeHead, self).__init__() with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class BiSeNet(Chain): """ BiSeNet model from 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1808.00897. Parameters: ---------- backbone : func -> nn.Sequential Feature extractor. aux : bool, default True Whether to output an auxiliary results. fixed_size : bool, default True Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (640, 480) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, backbone, aux=True, fixed_size=True, in_channels=3, in_size=(640, 480), classes=19): super(BiSeNet, self).__init__() assert (in_channels == 3) self.in_size = in_size self.classes = classes self.aux = aux self.fixed_size = fixed_size with self.init_scope(): self.backbone, backbone_out_channels = backbone() y_out_channels = backbone_out_channels[0] z_out_channels = 2 * y_out_channels y32_out_size = (self.in_size[0] // 32, self.in_size[1] // 32) if fixed_size else None self.pool = PyramidPooling( x16_in_channels=backbone_out_channels[1], x32_in_channels=backbone_out_channels[2], y_out_channels=y_out_channels, y32_out_size=y32_out_size) self.head_z8 = BiSeHead( in_channels=z_out_channels, mid_channels=z_out_channels, out_channels=classes) self.up8 = InterpolationBlock(scale_factor=(8 if fixed_size else None)) if self.aux: mid_channels = y_out_channels // 2 self.head_y8 = BiSeHead( in_channels=y_out_channels, mid_channels=mid_channels, out_channels=classes) self.head_y16 = BiSeHead( in_channels=y_out_channels, mid_channels=mid_channels, out_channels=classes) self.up16 = InterpolationBlock(scale_factor=(16 if fixed_size else None)) def __call__(self, x): assert (x.shape[2] % 32 == 0) and (x.shape[3] % 32 == 0) x8, x16, x32 = self.backbone(x) z8, y8, y16 = self.pool(x8, x16, x32) z8 = self.head_z8(z8) z8 = self.up8(z8) if self.aux: y8 = self.head_y8(y8) y16 = self.head_y16(y16) y8 = self.up8(y8) y16 = self.up16(y16) return z8, y8, y16 else: return z8 def get_bisenet(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create BiSeNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ net = BiSeNet( **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def bisenet_resnet18_celebamaskhq(pretrained_backbone=False, classes=19, **kwargs): """ BiSeNet model on the base of ResNet-18 for face segmentation on CelebAMask-HQ from 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1808.00897. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 19 Number of classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ def backbone(): features_raw = resnet18(pretrained=pretrained_backbone).features del features_raw.final_pool features = MultiOutputSequential(return_last=False) with features.init_scope(): setattr(features, "init_block", features_raw.el(0)) for i, block_name in enumerate(features_raw.layer_names[1:]): stage = features_raw[block_name] if i != 0: stage.do_output = True setattr(features, "stage{}".format(i + 1), stage) out_channels = [128, 256, 512] return features, out_channels return get_bisenet(backbone=backbone, classes=classes, model_name="bisenet_resnet18_celebamaskhq", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False in_size = (640, 480) aux = True pretrained = False models = [ bisenet_resnet18_celebamaskhq, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, aux=aux) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) if aux: assert (model != bisenet_resnet18_celebamaskhq or weight_count == 13300416) else: assert (model != bisenet_resnet18_celebamaskhq or weight_count == 13150272) batch = 1 x = np.random.rand(batch, 3, in_size[0], in_size[1]).astype(np.float32) ys = net(x) y = ys[0] if aux else ys assert (y.shape == (batch, 19, in_size[0], in_size[1])) if __name__ == "__main__": _test()
13,212
29.943794
119
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/resnet.py
""" ResNet for ImageNet-1K, implemented in Chainer. Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. """ __all__ = ['ResNet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2', 'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b', 'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'ResBlock', 'ResBottleneck', 'ResUnit', 'ResInitBlock'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, conv3x3_block, conv7x7_block, SimpleSequential class ResBlock(Chain): """ Simple ResNet block for residual path in ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. """ def __init__(self, in_channels, out_channels, stride, use_bias=False, use_bn=True): super(ResBlock, self).__init__() with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=use_bias, use_bn=use_bn) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn, activation=None) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class ResBottleneck(Chain): """ ResNet bottleneck block for residual path in ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for the second convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, pad=1, dilate=1, conv1_stride=False, bottleneck_factor=4): super(ResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, stride=(stride if conv1_stride else 1)) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=(1 if conv1_stride else stride), pad=pad, dilate=dilate) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class ResUnit(Chain): """ ResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for the second convolution layer in bottleneck. dilate : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer in bottleneck. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, pad=1, dilate=1, use_bias=False, use_bn=True, bottleneck=True, conv1_stride=False): super(ResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, pad=pad, dilate=dilate, conv1_stride=conv1_stride) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=use_bias, use_bn=use_bn) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=use_bias, use_bn=use_bn, activation=None) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class ResInitBlock(Chain): """ ResNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ResInitBlock, self).__init__() with self.init_scope(): self.conv = conv7x7_block( in_channels=in_channels, out_channels=out_channels, stride=2) self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) def __call__(self, x): x = self.conv(x) x = self.pool(x) return x class ResNet(Chain): """ ResNet model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000): super(ResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), ResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_resnet(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = ResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def resnet10(**kwargs): """ ResNet-10 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=10, model_name="resnet10", **kwargs) def resnet12(**kwargs): """ ResNet-12 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=12, model_name="resnet12", **kwargs) def resnet14(**kwargs): """ ResNet-14 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=14, model_name="resnet14", **kwargs) def resnetbc14b(**kwargs): """ ResNet-BC-14b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs) def resnet16(**kwargs): """ ResNet-16 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=16, model_name="resnet16", **kwargs) def resnet18_wd4(**kwargs): """ ResNet-18 model with 0.25 width scale from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs) def resnet18_wd2(**kwargs): """ ResNet-18 model with 0.5 width scale from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs) def resnet18_w3d4(**kwargs): """ ResNet-18 model with 0.75 width scale from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs) def resnet18(**kwargs): """ ResNet-18 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, model_name="resnet18", **kwargs) def resnet26(**kwargs): """ ResNet-26 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs) def resnetbc26b(**kwargs): """ ResNet-BC-26b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs) def resnet34(**kwargs): """ ResNet-34 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=34, model_name="resnet34", **kwargs) def resnetbc38b(**kwargs): """ ResNet-BC-38b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b", **kwargs) def resnet50(**kwargs): """ ResNet-50 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=50, model_name="resnet50", **kwargs) def resnet50b(**kwargs): """ ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs) def resnet101(**kwargs): """ ResNet-101 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=101, model_name="resnet101", **kwargs) def resnet101b(**kwargs): """ ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs) def resnet152(**kwargs): """ ResNet-152 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=152, model_name="resnet152", **kwargs) def resnet152b(**kwargs): """ ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs) def resnet200(**kwargs): """ ResNet-200 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=200, model_name="resnet200", **kwargs) def resnet200b(**kwargs): """ ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ resnet10, resnet12, resnet14, resnetbc14b, resnet16, resnet18_wd4, resnet18_wd2, resnet18_w3d4, resnet18, resnet26, resnetbc26b, resnet34, resnetbc38b, resnet50, resnet50b, resnet101, resnet101b, resnet152, resnet152b, resnet200, resnet200b, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnet10 or weight_count == 5418792) assert (model != resnet12 or weight_count == 5492776) assert (model != resnet14 or weight_count == 5788200) assert (model != resnetbc14b or weight_count == 10064936) assert (model != resnet16 or weight_count == 6968872) assert (model != resnet18_wd4 or weight_count == 3937400) assert (model != resnet18_wd2 or weight_count == 5804296) assert (model != resnet18_w3d4 or weight_count == 8476056) assert (model != resnet18 or weight_count == 11689512) assert (model != resnet26 or weight_count == 17960232) assert (model != resnetbc26b or weight_count == 15995176) assert (model != resnet34 or weight_count == 21797672) assert (model != resnetbc38b or weight_count == 21925416) assert (model != resnet50 or weight_count == 25557032) assert (model != resnet50b or weight_count == 25557032) assert (model != resnet101 or weight_count == 44549160) assert (model != resnet101b or weight_count == 44549160) assert (model != resnet152 or weight_count == 60192808) assert (model != resnet152b or weight_count == 60192808) assert (model != resnet200 or weight_count == 64673832) assert (model != resnet200b or weight_count == 64673832) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
25,928
32.370656
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/simpleposemobile_coco.py
""" SimplePose(Mobile) for COCO Keypoint, implemented in Chainer. Original paper: 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. """ __all__ = ['SimplePoseMobile', 'simplepose_mobile_resnet18_coco', 'simplepose_mobile_resnet50b_coco', 'simplepose_mobile_mobilenet_w1_coco', 'simplepose_mobile_mobilenetv2b_w1_coco', 'simplepose_mobile_mobilenetv3_small_w1_coco', 'simplepose_mobile_mobilenetv3_large_w1_coco'] import os from chainer import Chain from chainer.serializers import load_npz from .common import conv1x1, DucBlock, HeatmapMaxDetBlock, SimpleSequential from .resnet import resnet18, resnet50b from .mobilenet import mobilenet_w1 from .mobilenetv2 import mobilenetv2b_w1 from .mobilenetv3 import mobilenetv3_small_w1, mobilenetv3_large_w1 class SimplePoseMobile(Chain): """ SimplePose(Mobile) model from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. channels : list of int Number of output channels for each decoder unit. decoder_init_block_channels : int Number of output channels for the initial unit of the decoder. return_heatmap : bool, default False Whether to return only heatmap. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (256, 192) Spatial size of the expected input image. keypoints : int, default 17 Number of keypoints. """ def __init__(self, backbone, backbone_out_channels, channels, decoder_init_block_channels, return_heatmap=False, in_channels=3, in_size=(256, 192), keypoints=17, **kwargs): super(SimplePoseMobile, self).__init__(**kwargs) assert (in_channels == 3) self.in_size = in_size self.keypoints = keypoints self.return_heatmap = return_heatmap with self.init_scope(): self.backbone = backbone self.decoder = SimpleSequential() with self.decoder.init_scope(): in_channels = backbone_out_channels setattr(self.decoder, "init_block", conv1x1( in_channels=in_channels, out_channels=decoder_init_block_channels)) in_channels = decoder_init_block_channels for i, out_channels in enumerate(channels): setattr(self.decoder, "unit{}".format(i + 1), DucBlock( in_channels=in_channels, out_channels=out_channels, scale_factor=2)) in_channels = out_channels setattr(self.decoder, "final_block", conv1x1( in_channels=in_channels, out_channels=keypoints)) self.heatmap_max_det = HeatmapMaxDetBlock() def __call__(self, x): x = self.backbone(x) heatmap = self.decoder(x) if self.return_heatmap: return heatmap else: keypoints = self.heatmap_max_det(heatmap) return keypoints def get_simpleposemobile(backbone, backbone_out_channels, keypoints, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create SimplePose(Mobile) model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. keypoints : int Number of keypoints. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ channels = [128, 64, 32] decoder_init_block_channels = 256 net = SimplePoseMobile( backbone=backbone, backbone_out_channels=backbone_out_channels, channels=channels, decoder_init_block_channels=decoder_init_block_channels, keypoints=keypoints, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def simplepose_mobile_resnet18_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose(Mobile) model on the base of ResNet-18 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnet18(pretrained=pretrained_backbone).features del backbone.final_pool return get_simpleposemobile(backbone=backbone, backbone_out_channels=512, keypoints=keypoints, model_name="simplepose_mobile_resnet18_coco", **kwargs) def simplepose_mobile_resnet50b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose(Mobile) model on the base of ResNet-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnet50b(pretrained=pretrained_backbone).features del backbone.final_pool return get_simpleposemobile(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_mobile_resnet50b_coco", **kwargs) def simplepose_mobile_mobilenet_w1_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose(Mobile) model on the base of 1.0 MobileNet-224 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = mobilenet_w1(pretrained=pretrained_backbone).features del backbone.final_pool return get_simpleposemobile(backbone=backbone, backbone_out_channels=1024, keypoints=keypoints, model_name="simplepose_mobile_mobilenet_w1_coco", **kwargs) def simplepose_mobile_mobilenetv2b_w1_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose(Mobile) model on the base of 1.0 MobileNetV2b-224 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = mobilenetv2b_w1(pretrained=pretrained_backbone).features del backbone.final_pool return get_simpleposemobile(backbone=backbone, backbone_out_channels=1280, keypoints=keypoints, model_name="simplepose_mobile_mobilenetv2b_w1_coco", **kwargs) def simplepose_mobile_mobilenetv3_small_w1_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose(Mobile) model on the base of MobileNetV3 Small 224/1.0 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = mobilenetv3_small_w1(pretrained=pretrained_backbone).features del backbone.final_pool return get_simpleposemobile(backbone=backbone, backbone_out_channels=576, keypoints=keypoints, model_name="simplepose_mobile_mobilenetv3_small_w1_coco", **kwargs) def simplepose_mobile_mobilenetv3_large_w1_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose(Mobile) model on the base of MobileNetV3 Large 224/1.0 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = mobilenetv3_large_w1(pretrained=pretrained_backbone).features del backbone.final_pool return get_simpleposemobile(backbone=backbone, backbone_out_channels=960, keypoints=keypoints, model_name="simplepose_mobile_mobilenetv3_large_w1_coco", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False in_size = (256, 192) keypoints = 17 return_heatmap = False pretrained = False models = [ simplepose_mobile_resnet18_coco, simplepose_mobile_resnet50b_coco, simplepose_mobile_mobilenet_w1_coco, simplepose_mobile_mobilenetv2b_w1_coco, simplepose_mobile_mobilenetv3_small_w1_coco, simplepose_mobile_mobilenetv3_large_w1_coco, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != simplepose_mobile_resnet18_coco or weight_count == 12858208) assert (model != simplepose_mobile_resnet50b_coco or weight_count == 25582944) assert (model != simplepose_mobile_mobilenet_w1_coco or weight_count == 5019744) assert (model != simplepose_mobile_mobilenetv2b_w1_coco or weight_count == 4102176) assert (model != simplepose_mobile_mobilenetv3_small_w1_coco or weight_count == 2625088) assert (model != simplepose_mobile_mobilenetv3_large_w1_coco or weight_count == 4768336) batch = 14 x = np.random.rand(batch, 3, in_size[0], in_size[1]).astype(np.float32) y = net(x) assert ((y.shape[0] == batch) and (y.shape[1] == keypoints)) if return_heatmap: assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4)) else: assert (y.shape[2] == 3) if __name__ == "__main__": _test()
12,638
38.870662
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/cbamresnet.py
""" CBAM-ResNet for ImageNet-1K, implemented in Chainer. Original paper: 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521. """ __all__ = ['CbamResNet', 'cbam_resnet18', 'cbam_resnet34', 'cbam_resnet50', 'cbam_resnet101', 'cbam_resnet152'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import SimpleSequential, conv1x1_block, conv7x7_block from .resnet import ResInitBlock, ResBlock, ResBottleneck class MLP(Chain): """ Multilayer perceptron block. Parameters: ---------- channels : int Number of input/output channels. reduction_ratio : int, default 16 Channel reduction ratio. """ def __init__(self, channels, reduction_ratio=16): super(MLP, self).__init__() mid_channels = channels // reduction_ratio with self.init_scope(): self.fc1 = L.Linear( in_size=channels, out_size=mid_channels) self.activ = F.relu self.fc2 = L.Linear( in_size=mid_channels, out_size=channels) def __call__(self, x): x = F.reshape(x, shape=(x.shape[0], -1)) x = self.fc1(x) x = self.activ(x) x = self.fc2(x) return x class ChannelGate(Chain): """ CBAM channel gate block. Parameters: ---------- channels : int Number of input/output channels. reduction_ratio : int, default 16 Channel reduction ratio. """ def __init__(self, channels, reduction_ratio=16): super(ChannelGate, self).__init__() with self.init_scope(): self.mlp = MLP( channels=channels, reduction_ratio=reduction_ratio) def __call__(self, x): att1 = F.average_pooling_2d(x, ksize=x.shape[2:]) att1 = self.mlp(att1) att2 = F.max_pooling_2d(x, ksize=x.shape[2:]) att2 = self.mlp(att2) att = att1 + att2 att = F.sigmoid(att) att = F.broadcast_to(F.expand_dims(F.expand_dims(att, axis=2), axis=3), x.shape) x = x * att return x class SpatialGate(Chain): """ CBAM spatial gate block. """ def __init__(self): super(SpatialGate, self).__init__() with self.init_scope(): self.conv = conv7x7_block( in_channels=2, out_channels=1, activation=None) def __call__(self, x): att1 = F.expand_dims(F.max(x, axis=1), axis=1) att2 = F.expand_dims(F.mean(x, axis=1), axis=1) att = F.concat((att1, att2), axis=1) att = self.conv(att) att = F.broadcast_to(F.sigmoid(att), x.shape) x = x * att return x class CbamBlock(Chain): """ CBAM attention block for CBAM-ResNet. Parameters: ---------- channels : int Number of input/output channels. reduction_ratio : int, default 16 Channel reduction ratio. """ def __init__(self, channels, reduction_ratio=16): super(CbamBlock, self).__init__() with self.init_scope(): self.ch_gate = ChannelGate( channels=channels, reduction_ratio=reduction_ratio) self.sp_gate = SpatialGate() def __call__(self, x): x = self.ch_gate(x) x = self.sp_gate(x) return x class CbamResUnit(Chain): """ CBAM-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. """ def __init__(self, in_channels, out_channels, stride, bottleneck): super(CbamResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=False) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.cbam = CbamBlock(channels=out_channels) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = self.cbam(x) x = x + identity x = self.activ(x) return x class CbamResNet(Chain): """ CBAM-ResNet model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(224, 224), classes=1000): super(CbamResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), CbamResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_resnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create CBAM-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. use_se : bool Whether to use SE block. width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 18: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] else: raise ValueError("Unsupported CBAM-ResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 if blocks < 50: channels_per_layers = [64, 128, 256, 512] bottleneck = False else: channels_per_layers = [256, 512, 1024, 2048] bottleneck = True channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = CbamResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def cbam_resnet18(**kwargs): """ CBAM-ResNet-18 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, model_name="cbam_resnet18", **kwargs) def cbam_resnet34(**kwargs): """ CBAM-ResNet-34 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=34, model_name="cbam_resnet34", **kwargs) def cbam_resnet50(**kwargs): """ CBAM-ResNet-50 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=50, model_name="cbam_resnet50", **kwargs) def cbam_resnet101(**kwargs): """ CBAM-ResNet-101 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=101, model_name="cbam_resnet101", **kwargs) def cbam_resnet152(**kwargs): """ CBAM-ResNet-152 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=152, model_name="cbam_resnet152", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ # cbam_resnet18, # cbam_resnet34, cbam_resnet50, # cbam_resnet101, # cbam_resnet152, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != cbam_resnet18 or weight_count == 11779392) assert (model != cbam_resnet34 or weight_count == 21960468) assert (model != cbam_resnet50 or weight_count == 28089624) assert (model != cbam_resnet101 or weight_count == 49330172) assert (model != cbam_resnet152 or weight_count == 66826848) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
13,242
29.726218
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/diracnetv2.py
""" DiracNetV2 for ImageNet-1K, implemented in Chainer. Original paper: 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,' https://arxiv.org/abs/1706.00388. """ __all__ = ['DiracNetV2', 'diracnet18v2', 'diracnet34v2'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import SimpleSequential class DiracConv(Chain): """ DiracNetV2 specific convolution block with pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. """ def __init__(self, in_channels, out_channels, ksize, stride, pad): super(DiracConv, self).__init__() with self.init_scope(): self.activ = F.relu self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=False) def __call__(self, x): x = self.activ(x) x = self.conv(x) return x def dirac_conv3x3(in_channels, out_channels): """ 3x3 version of the DiracNetV2 specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ return DiracConv( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=1, pad=1) class DiracInitBlock(Chain): """ DiracNetV2 specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(DiracInitBlock, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=7, stride=2, pad=3, nobias=False) self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) def __call__(self, x): x = self.conv(x) x = self.pool(x) return x class DiracNetV2(Chain): """ DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,' https://arxiv.org/abs/1706.00388. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, in_channels=3, in_size=(224, 224), classes=1000): super(DiracNetV2, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", DiracInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): setattr(stage, "unit{}".format(j + 1), dirac_conv3x3( in_channels=in_channels, out_channels=out_channels)) in_channels = out_channels if i != len(channels) - 1: setattr(stage, "pool{}".format(i + 1), partial( F.max_pooling_2d, ksize=2, stride=2, pad=0)) setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_activ", F.relu) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_diracnetv2(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create DiracNetV2 model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 18: layers = [4, 4, 4, 4] elif blocks == 34: layers = [6, 8, 12, 6] else: raise ValueError("Unsupported DiracNetV2 with number of blocks: {}".format(blocks)) channels_per_layers = [64, 128, 256, 512] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] init_block_channels = 64 net = DiracNetV2( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def diracnet18v2(**kwargs): """ DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,' https://arxiv.org/abs/1706.00388. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diracnetv2(blocks=18, model_name="diracnet18v2", **kwargs) def diracnet34v2(**kwargs): """ DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,' https://arxiv.org/abs/1706.00388. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diracnetv2(blocks=34, model_name="diracnet34v2", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ diracnet18v2, diracnet34v2, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != diracnet18v2 or weight_count == 11511784) assert (model != diracnet34v2 or weight_count == 21616232) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
8,791
29.109589
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/sepreresnet_cifar.py
""" SE-PreResNet for CIFAR/SVHN, implemented in Chainer. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['CIFARSEPreResNet', 'sepreresnet20_cifar10', 'sepreresnet20_cifar100', 'sepreresnet20_svhn', 'sepreresnet56_cifar10', 'sepreresnet56_cifar100', 'sepreresnet56_svhn', 'sepreresnet110_cifar10', 'sepreresnet110_cifar100', 'sepreresnet110_svhn', 'sepreresnet164bn_cifar10', 'sepreresnet164bn_cifar100', 'sepreresnet164bn_svhn', 'sepreresnet272bn_cifar10', 'sepreresnet272bn_cifar100', 'sepreresnet272bn_svhn', 'sepreresnet542bn_cifar10', 'sepreresnet542bn_cifar100', 'sepreresnet542bn_svhn', 'sepreresnet1001_cifar10', 'sepreresnet1001_cifar100', 'sepreresnet1001_svhn', 'sepreresnet1202_cifar10', 'sepreresnet1202_cifar100', 'sepreresnet1202_svhn'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3_block, SimpleSequential from .sepreresnet import SEPreResUnit class CIFARSEPreResNet(Chain): """ SE-PreResNet model for CIFAR from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), classes=10): super(CIFARSEPreResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), SEPreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_sepreresnet_cifar(classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create SE-PreResNet model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARSEPreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def sepreresnet20_cifar10(classes=10, **kwargs): """ SE-PreResNet-20 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="sepreresnet20_cifar10", **kwargs) def sepreresnet20_cifar100(classes=100, **kwargs): """ SE-PreResNet-20 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="sepreresnet20_cifar100", **kwargs) def sepreresnet20_svhn(classes=10, **kwargs): """ SE-PreResNet-20 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="sepreresnet20_svhn", **kwargs) def sepreresnet56_cifar10(classes=10, **kwargs): """ SE-PreResNet-56 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="sepreresnet56_cifar10", **kwargs) def sepreresnet56_cifar100(classes=100, **kwargs): """ SE-PreResNet-56 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="sepreresnet56_cifar100", **kwargs) def sepreresnet56_svhn(classes=10, **kwargs): """ SE-PreResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="sepreresnet56_svhn", **kwargs) def sepreresnet110_cifar10(classes=10, **kwargs): """ SE-PreResNet-110 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="sepreresnet110_cifar10", **kwargs) def sepreresnet110_cifar100(classes=100, **kwargs): """ SE-PreResNet-110 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="sepreresnet110_cifar100", **kwargs) def sepreresnet110_svhn(classes=10, **kwargs): """ SE-PreResNet-110 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="sepreresnet110_svhn", **kwargs) def sepreresnet164bn_cifar10(classes=10, **kwargs): """ SE-PreResNet-164(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_cifar10", **kwargs) def sepreresnet164bn_cifar100(classes=100, **kwargs): """ SE-PreResNet-164(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_cifar100", **kwargs) def sepreresnet164bn_svhn(classes=10, **kwargs): """ SE-PreResNet-164(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_svhn", **kwargs) def sepreresnet272bn_cifar10(classes=10, **kwargs): """ SE-PreResNet-272(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_cifar10", **kwargs) def sepreresnet272bn_cifar100(classes=100, **kwargs): """ SE-PreResNet-272(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_cifar100", **kwargs) def sepreresnet272bn_svhn(classes=10, **kwargs): """ SE-PreResNet-272(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_svhn", **kwargs) def sepreresnet542bn_cifar10(classes=10, **kwargs): """ SE-PreResNet-542(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_cifar10", **kwargs) def sepreresnet542bn_cifar100(classes=100, **kwargs): """ SE-PreResNet-542(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_cifar100", **kwargs) def sepreresnet542bn_svhn(classes=10, **kwargs): """ SE-PreResNet-542(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_svhn", **kwargs) def sepreresnet1001_cifar10(classes=10, **kwargs): """ SE-PreResNet-1001 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_cifar10", **kwargs) def sepreresnet1001_cifar100(classes=100, **kwargs): """ SE-PreResNet-1001 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_cifar100", **kwargs) def sepreresnet1001_svhn(classes=10, **kwargs): """ SE-PreResNet-1001 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_svhn", **kwargs) def sepreresnet1202_cifar10(classes=10, **kwargs): """ SE-PreResNet-1202 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_cifar10", **kwargs) def sepreresnet1202_cifar100(classes=100, **kwargs): """ SE-PreResNet-1202 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_cifar100", **kwargs) def sepreresnet1202_svhn(classes=10, **kwargs): """ SE-PreResNet-1202 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (sepreresnet20_cifar10, 10), (sepreresnet20_cifar100, 100), (sepreresnet20_svhn, 10), (sepreresnet56_cifar10, 10), (sepreresnet56_cifar100, 100), (sepreresnet56_svhn, 10), (sepreresnet110_cifar10, 10), (sepreresnet110_cifar100, 100), (sepreresnet110_svhn, 10), (sepreresnet164bn_cifar10, 10), (sepreresnet164bn_cifar100, 100), (sepreresnet164bn_svhn, 10), (sepreresnet272bn_cifar10, 10), (sepreresnet272bn_cifar100, 100), (sepreresnet272bn_svhn, 10), (sepreresnet542bn_cifar10, 10), (sepreresnet542bn_cifar100, 100), (sepreresnet542bn_svhn, 10), (sepreresnet1001_cifar10, 10), (sepreresnet1001_cifar100, 100), (sepreresnet1001_svhn, 10), (sepreresnet1202_cifar10, 10), (sepreresnet1202_cifar100, 100), (sepreresnet1202_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != sepreresnet20_cifar10 or weight_count == 274559) assert (model != sepreresnet20_cifar100 or weight_count == 280409) assert (model != sepreresnet20_svhn or weight_count == 274559) assert (model != sepreresnet56_cifar10 or weight_count == 862601) assert (model != sepreresnet56_cifar100 or weight_count == 868451) assert (model != sepreresnet56_svhn or weight_count == 862601) assert (model != sepreresnet110_cifar10 or weight_count == 1744664) assert (model != sepreresnet110_cifar100 or weight_count == 1750514) assert (model != sepreresnet110_svhn or weight_count == 1744664) assert (model != sepreresnet164bn_cifar10 or weight_count == 1904882) assert (model != sepreresnet164bn_cifar100 or weight_count == 1928012) assert (model != sepreresnet164bn_svhn or weight_count == 1904882) assert (model != sepreresnet272bn_cifar10 or weight_count == 3152450) assert (model != sepreresnet272bn_cifar100 or weight_count == 3175580) assert (model != sepreresnet272bn_svhn or weight_count == 3152450) assert (model != sepreresnet542bn_cifar10 or weight_count == 6271370) assert (model != sepreresnet542bn_cifar100 or weight_count == 6294500) assert (model != sepreresnet542bn_svhn or weight_count == 6271370) assert (model != sepreresnet1001_cifar10 or weight_count == 11573534) assert (model != sepreresnet1001_cifar100 or weight_count == 11596664) assert (model != sepreresnet1001_svhn or weight_count == 11573534) assert (model != sepreresnet1202_cifar10 or weight_count == 19581938) assert (model != sepreresnet1202_cifar100 or weight_count == 19587788) assert (model != sepreresnet1202_svhn or weight_count == 19581938) x = np.zeros((1, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (1, classes)) if __name__ == "__main__": _test()
24,348
37.22449
119
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/danet.py
""" DANet for image segmentation, implemented in Chainer. Original paper: 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983. """ __all__ = ['DANet', 'danet_resnetd50b_cityscapes', 'danet_resnetd101b_cityscapes'] import os import chainer.functions as F from chainer import link from chainer import Chain from functools import partial from chainer.serializers import load_npz from chainer.variable import Parameter from chainer.initializers import _get_initializer from .common import conv1x1, conv3x3_block from .resnetd import resnetd50b, resnetd101b class ScaleBlock(link.Link): """ Simple scale block. Parameters: ---------- initial_alpha : obj, default 0 Initializer for the weights. """ def __init__(self, initial_alpha=0): super(ScaleBlock, self).__init__() with self.init_scope(): alpha_initializer = _get_initializer(initial_alpha) self.alpha = Parameter( initializer=alpha_initializer, shape=(1,), name="alpha") def __call__(self, x): return self.alpha.data * x class PosAttBlock(Chain): """ Position attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983. It captures long-range spatial contextual information. Parameters: ---------- channels : int Number of channels. reduction : int, default 8 Squeeze reduction value. """ def __init__(self, channels, reduction=8): super(PosAttBlock, self).__init__() mid_channels = channels // reduction with self.init_scope(): self.query_conv = conv1x1( in_channels=channels, out_channels=mid_channels, use_bias=True) self.key_conv = conv1x1( in_channels=channels, out_channels=mid_channels, use_bias=True) self.value_conv = conv1x1( in_channels=channels, out_channels=channels, use_bias=True) self.scale = ScaleBlock() def __call__(self, x): batch, channels, height, width = x.shape proj_query = self.query_conv(x).reshape((batch, -1, height * width)) proj_key = self.key_conv(x).reshape((batch, -1, height * width)) proj_value = self.value_conv(x).reshape((batch, -1, height * width)) energy = F.batch_matmul(proj_query, proj_key, transa=True) w = F.softmax(energy, axis=-1) y = F.batch_matmul(proj_value, w, transb=True) y = y.reshape((batch, -1, height, width)) y = self.scale(y) + x return y class ChaAttBlock(Chain): """ Channel attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983. It explicitly models interdependencies between channels. """ def __init__(self): super(ChaAttBlock, self).__init__() with self.init_scope(): self.scale = ScaleBlock() def __call__(self, x): batch, channels, height, width = x.shape proj_query = x.reshape((batch, -1, height * width)) proj_key = x.reshape((batch, -1, height * width)) proj_value = x.reshape((batch, -1, height * width)) energy = F.batch_matmul(proj_query, proj_key, transb=True) energy_new = F.broadcast_to(F.max(energy, axis=-1, keepdims=True), shape=energy.shape) - energy w = F.softmax(energy_new, axis=-1) y = F.batch_matmul(w, proj_value) y = y.reshape((batch, -1, height, width)) y = self.scale(y) + x return y class DANetHeadBranch(Chain): """ DANet head branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. pose_att : bool, default True Whether to use position attention instead of channel one. """ def __init__(self, in_channels, out_channels, pose_att=True): super(DANetHeadBranch, self).__init__() mid_channels = in_channels // 4 dropout_rate = 0.1 with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels) if pose_att: self.att = PosAttBlock(mid_channels) else: self.att = ChaAttBlock() self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels) self.conv3 = conv1x1( in_channels=mid_channels, out_channels=out_channels, use_bias=True) self.dropout = partial( F.dropout, ratio=dropout_rate) def __call__(self, x): x = self.conv1(x) x = self.att(x) y = self.conv2(x) x = self.conv3(y) x = self.dropout(x) return x, y class DANetHead(Chain): """ DANet head block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(DANetHead, self).__init__() mid_channels = in_channels // 4 dropout_rate = 0.1 with self.init_scope(): self.branch_pa = DANetHeadBranch( in_channels=in_channels, out_channels=out_channels, pose_att=True) self.branch_ca = DANetHeadBranch( in_channels=in_channels, out_channels=out_channels, pose_att=False) self.conv = conv1x1( in_channels=mid_channels, out_channels=out_channels, use_bias=True) self.dropout = partial( F.dropout, ratio=dropout_rate) def __call__(self, x): pa_x, pa_y = self.branch_pa(x) ca_x, ca_y = self.branch_ca(x) y = pa_y + ca_y x = self.conv(y) x = self.dropout(x) return x, pa_x, ca_x class DANet(Chain): """ DANet model from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int, default 2048 Number of output channels form feature extractor. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default True Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (480, 480) Spatial size of the expected input image. classes : int, default 19 Number of segmentation classes. """ def __init__(self, backbone, backbone_out_channels=2048, aux=False, fixed_size=True, in_channels=3, in_size=(480, 480), classes=19): super(DANet, self).__init__() assert (in_channels > 0) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.classes = classes self.aux = aux self.fixed_size = fixed_size with self.init_scope(): self.backbone = backbone self.head = DANetHead( in_channels=backbone_out_channels, out_channels=classes) def __call__(self, x): in_size = self.in_size if self.fixed_size else x.shape[2:] x, _ = self.backbone(x) x, y, z = self.head(x) x = F.resize_images(x, output_shape=in_size) if self.aux: y = F.resize_images(y, output_shape=in_size) z = F.resize_images(z, output_shape=in_size) return x, y, z else: return x def get_danet(backbone, classes, aux=False, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create DANet model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. classes : int Number of segmentation classes. aux : bool, default False Whether to output an auxiliary result. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ net = DANet( backbone=backbone, classes=classes, aux=aux, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def danet_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs): """ DANet model on the base of ResNet(D)-50b for Cityscapes from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone.final_pool return get_danet(backbone=backbone, classes=classes, aux=aux, model_name="danet_resnetd50b_cityscapes", **kwargs) def danet_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs): """ DANet model on the base of ResNet(D)-101b for Cityscapes from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone.final_pool return get_danet(backbone=backbone, classes=classes, aux=aux, model_name="danet_resnetd101b_cityscapes", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False in_size = (480, 480) aux = False pretrained = False models = [ danet_resnetd50b_cityscapes, danet_resnetd101b_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, aux=aux) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != danet_resnetd50b_cityscapes or weight_count == 47586427) assert (model != danet_resnetd101b_cityscapes or weight_count == 66578555) batch = 2 classes = 19 x = np.zeros((batch, 3, in_size[0], in_size[1]), np.float32) ys = net(x) y = ys[0] if aux else ys assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and (y.shape[3] == x.shape[3])) if __name__ == "__main__": _test()
12,551
30.938931
116
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/mobilenetv2.py
""" MobileNetV2 for ImageNet-1K, implemented in Chainer. Original paper: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. """ __all__ = ['MobileNetV2', 'mobilenetv2_w1', 'mobilenetv2_w3d4', 'mobilenetv2_wd2', 'mobilenetv2_wd4', 'mobilenetv2b_w1', 'mobilenetv2b_w3d4', 'mobilenetv2b_wd2', 'mobilenetv2b_wd4'] import os import chainer.functions as F from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import ReLU6, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, SimpleSequential class LinearBottleneck(Chain): """ So-called 'Linear Bottleneck' layer. It is used as a MobileNetV2 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the second convolution layer. expansion : bool Whether do expansion of channels. remove_exp_conv : bool Whether to remove expansion convolution. """ def __init__(self, in_channels, out_channels, stride, expansion, remove_exp_conv): super(LinearBottleneck, self).__init__() self.residual = (in_channels == out_channels) and (stride == 1) mid_channels = in_channels * 6 if expansion else in_channels self.use_exp_conv = (expansion or (not remove_exp_conv)) with self.init_scope(): if self.use_exp_conv: self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=ReLU6()) self.conv2 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=ReLU6()) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def __call__(self, x): if self.residual: identity = x if self.use_exp_conv: x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) if self.residual: x = x + identity return x class MobileNetV2(Chain): """ MobileNetV2 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. remove_exp_conv : bool Whether to remove expansion convolution. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, remove_exp_conv, in_channels=3, in_size=(224, 224), classes=1000): super(MobileNetV2, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2, activation=ReLU6())) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 expansion = (i != 0) or (j != 0) setattr(stage, "unit{}".format(j + 1), LinearBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, expansion=expansion, remove_exp_conv=remove_exp_conv)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels, activation=ReLU6())) in_channels = final_block_channels setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "final_conv", conv1x1( in_channels=in_channels, out_channels=classes, use_bias=False)) setattr(self.output, "final_flatten", partial( F.reshape, shape=(-1, classes))) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_mobilenetv2(width_scale, remove_exp_conv=False, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create MobileNetV2 model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. remove_exp_conv : bool, default False Whether to remove expansion convolution. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ init_block_channels = 32 final_block_channels = 1280 layers = [1, 2, 3, 4, 3, 3, 1] downsample = [0, 1, 1, 1, 0, 1, 0] channels_per_layers = [16, 24, 32, 64, 96, 160, 320] from functools import reduce channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(channels_per_layers, layers, downsample), [[]]) if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = int(init_block_channels * width_scale) if width_scale > 1.0: final_block_channels = int(final_block_channels * width_scale) net = MobileNetV2( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, remove_exp_conv=remove_exp_conv, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def mobilenetv2_w1(**kwargs): """ 1.0 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=1.0, model_name="mobilenetv2_w1", **kwargs) def mobilenetv2_w3d4(**kwargs): """ 0.75 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.75, model_name="mobilenetv2_w3d4", **kwargs) def mobilenetv2_wd2(**kwargs): """ 0.5 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.5, model_name="mobilenetv2_wd2", **kwargs) def mobilenetv2_wd4(**kwargs): """ 0.25 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.25, model_name="mobilenetv2_wd4", **kwargs) def mobilenetv2b_w1(**kwargs): """ 1.0 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=1.0, remove_exp_conv=True, model_name="mobilenetv2b_w1", **kwargs) def mobilenetv2b_w3d4(**kwargs): """ 0.75 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.75, remove_exp_conv=True, model_name="mobilenetv2b_w3d4", **kwargs) def mobilenetv2b_wd2(**kwargs): """ 0.5 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.5, remove_exp_conv=True, model_name="mobilenetv2b_wd2", **kwargs) def mobilenetv2b_wd4(**kwargs): """ 0.25 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.25, remove_exp_conv=True, model_name="mobilenetv2b_wd4", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ mobilenetv2_w1, mobilenetv2_w3d4, mobilenetv2_wd2, mobilenetv2_wd4, mobilenetv2b_w1, mobilenetv2b_w3d4, mobilenetv2b_wd2, mobilenetv2b_wd4, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != mobilenetv2_w1 or weight_count == 3504960) assert (model != mobilenetv2_w3d4 or weight_count == 2627592) assert (model != mobilenetv2_wd2 or weight_count == 1964736) assert (model != mobilenetv2_wd4 or weight_count == 1516392) assert (model != mobilenetv2b_w1 or weight_count == 3503872) assert (model != mobilenetv2b_w3d4 or weight_count == 2626968) assert (model != mobilenetv2b_wd2 or weight_count == 1964448) assert (model != mobilenetv2b_wd4 or weight_count == 1516312) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
13,097
34.02139
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/squeezenet.py
""" SqueezeNet for ImageNet-1K, implemented in Chainer. Original paper: 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. """ __all__ = ['SqueezeNet', 'squeezenet_v1_0', 'squeezenet_v1_1', 'squeezeresnet_v1_0', 'squeezeresnet_v1_1'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import SimpleSequential class FireConv(Chain): """ SqueezeNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. pad : int or tuple/list of 2 int Padding value for convolution layer. """ def __init__(self, in_channels, out_channels, ksize, pad): super(FireConv, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, pad=pad) def __call__(self, x): x = self.conv(x) x = F.relu(x) return x class FireUnit(Chain): """ SqueezeNet unit, so-called 'Fire' unit. Parameters: ---------- in_channels : int Number of input channels. squeeze_channels : int Number of output channels for squeeze convolution blocks. expand1x1_channels : int Number of output channels for expand 1x1 convolution blocks. expand3x3_channels : int Number of output channels for expand 3x3 convolution blocks. residual : bool Whether use residual connection. """ def __init__(self, in_channels, squeeze_channels, expand1x1_channels, expand3x3_channels, residual): super(FireUnit, self).__init__() self.residual = residual with self.init_scope(): self.squeeze = FireConv( in_channels=in_channels, out_channels=squeeze_channels, ksize=1, pad=0) self.expand1x1 = FireConv( in_channels=squeeze_channels, out_channels=expand1x1_channels, ksize=1, pad=0) self.expand3x3 = FireConv( in_channels=squeeze_channels, out_channels=expand3x3_channels, ksize=3, pad=1) def __call__(self, x): if self.residual: identity = x x = self.squeeze(x) y1 = self.expand1x1(x) y2 = self.expand3x3(x) out = F.concat((y1, y2), axis=1) if self.residual: out = out + identity return out class SqueezeInitBlock(Chain): """ SqueezeNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. """ def __init__(self, in_channels, out_channels, ksize): super(SqueezeInitBlock, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=2) def __call__(self, x): x = self.conv(x) x = F.relu(x) return x class SqueezeNet(Chain): """ SqueezeNet model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- channels : list of list of int Number of output channels for each unit. residuals : bool Whether to use residual units. init_block_kernel_size : int or tuple/list of 2 int The dimensions of the convolution window for the initial unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, residuals, init_block_kernel_size, init_block_channels, in_channels=3, in_size=(224, 224), classes=1000): super(SqueezeNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", SqueezeInitBlock( in_channels=in_channels, out_channels=init_block_channels, ksize=init_block_kernel_size)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): setattr(stage, "pool{}".format(i + 1), partial( F.max_pooling_2d, ksize=3, stride=2)) for j, out_channels in enumerate(channels_per_stage): expand_channels = out_channels // 2 squeeze_channels = out_channels // 8 setattr(stage, "unit{}".format(j + 1), FireUnit( in_channels=in_channels, squeeze_channels=squeeze_channels, expand1x1_channels=expand_channels, expand3x3_channels=expand_channels, residual=((residuals is not None) and (residuals[i][j] == 1)))) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "dropout", partial( F.dropout, ratio=0.5)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "final_conv", L.Convolution2D( in_channels=in_channels, out_channels=classes, ksize=1)) setattr(self.output, "final_activ", F.relu) setattr(self.output, "final_pool", partial( F.average_pooling_2d, ksize=13, stride=1)) setattr(self.output, "final_flatten", partial( F.reshape, shape=(-1, classes))) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_squeezenet(version, residual=False, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create SqueezeNet model with specific parameters. Parameters: ---------- version : str Version of SqueezeNet ('1.0' or '1.1'). residual : bool, default False Whether to use residual connections. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if version == '1.0': channels = [[128, 128, 256], [256, 384, 384, 512], [512]] residuals = [[0, 1, 0], [1, 0, 1, 0], [1]] init_block_kernel_size = 7 init_block_channels = 96 elif version == '1.1': channels = [[128, 128], [256, 256], [384, 384, 512, 512]] residuals = [[0, 1], [0, 1], [0, 1, 0, 1]] init_block_kernel_size = 3 init_block_channels = 64 else: raise ValueError("Unsupported SqueezeNet version {}".format(version)) if not residual: residuals = None net = SqueezeNet( channels=channels, residuals=residuals, init_block_kernel_size=init_block_kernel_size, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def squeezenet_v1_0(**kwargs): """ SqueezeNet 'vanilla' model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_squeezenet(version="1.0", residual=False, model_name="squeezenet_v1_0", **kwargs) def squeezenet_v1_1(**kwargs): """ SqueezeNet v1.1 model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_squeezenet(version="1.1", residual=False, model_name="squeezenet_v1_1", **kwargs) def squeezeresnet_v1_0(**kwargs): """ SqueezeNet model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_squeezenet(version="1.0", residual=True, model_name="squeezeresnet_v1_0", **kwargs) def squeezeresnet_v1_1(**kwargs): """ SqueezeNet v1.1 model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_squeezenet(version="1.1", residual=True, model_name="squeezeresnet_v1_1", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ squeezenet_v1_0, squeezenet_v1_1, squeezeresnet_v1_0, squeezeresnet_v1_1, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != squeezenet_v1_0 or weight_count == 1248424) assert (model != squeezenet_v1_1 or weight_count == 1235496) assert (model != squeezeresnet_v1_0 or weight_count == 1248424) assert (model != squeezeresnet_v1_1 or weight_count == 1235496) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
12,298
31.885027
118
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/nin_cifar.py
""" NIN for CIFAR/SVHN, implemented in Chainer. Original paper: 'Network In Network,' https://arxiv.org/abs/1312.4400. """ __all__ = ['CIFARNIN', 'nin_cifar10', 'nin_cifar100', 'nin_svhn'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import SimpleSequential class NINConv(Chain): """ NIN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 0 Padding value for convolution layer. """ def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0): super(NINConv, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=False) self.activ = F.relu def __call__(self, x): x = self.conv(x) x = self.activ(x) return x class CIFARNIN(Chain): """ NIN model for CIFAR from 'Network In Network,' https://arxiv.org/abs/1312.4400. Parameters: ---------- channels : list of list of int Number of output channels for each unit. first_ksizes : list of int Convolution window sizes for the first units in each stage. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, first_ksizes, in_channels=3, in_size=(32, 32), classes=10): super(CIFARNIN, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): if (j == 0) and (i != 0): if i == 1: setattr(stage, "pool{}".format(i + 1), partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False)) else: setattr(stage, "pool{}".format(i + 1), partial( F.average_pooling_2d, ksize=3, stride=2, pad=1)) setattr(stage, "dropout{}".format(i + 1), partial( F.dropout, ratio=0.5)) kernel_size = first_ksizes[i] if j == 0 else 1 padding = (kernel_size - 1) // 2 setattr(stage, "unit{}".format(j + 1), NINConv( in_channels=in_channels, out_channels=out_channels, ksize=kernel_size, pad=padding)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "final_conv", NINConv( in_channels=in_channels, out_channels=classes, ksize=1)) setattr(self.output, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) setattr(self.output, "final_flatten", partial( F.reshape, shape=(-1, classes))) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_nin_cifar(classes, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create NIN model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ channels = [[192, 160, 96], [192, 192, 192], [192, 192]] first_ksizes = [5, 5, 3] net = CIFARNIN( channels=channels, first_ksizes=first_ksizes, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def nin_cifar10(classes=10, **kwargs): """ NIN model for CIFAR-10 from 'Network In Network,' https://arxiv.org/abs/1312.4400. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_nin_cifar(classes=classes, model_name="nin_cifar10", **kwargs) def nin_cifar100(classes=100, **kwargs): """ NIN model for CIFAR-100 from 'Network In Network,' https://arxiv.org/abs/1312.4400. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_nin_cifar(classes=classes, model_name="nin_cifar100", **kwargs) def nin_svhn(classes=10, **kwargs): """ NIN model for SVHN from 'Network In Network,' https://arxiv.org/abs/1312.4400. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_nin_cifar(classes=classes, model_name="nin_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (nin_cifar10, 10), (nin_cifar100, 100), (nin_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != nin_cifar10 or weight_count == 966986) assert (model != nin_cifar100 or weight_count == 984356) assert (model != nin_svhn or weight_count == 966986) x = np.zeros((1, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (1, classes)) if __name__ == "__main__": _test()
8,429
31.548263
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/vgg.py
""" VGG for ImageNet-1K, implemented in Chainer. Original paper: 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. """ __all__ = ['VGG', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'bn_vgg11', 'bn_vgg13', 'bn_vgg16', 'bn_vgg19', 'bn_vgg11b', 'bn_vgg13b', 'bn_vgg16b', 'bn_vgg19b'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3_block, SimpleSequential class VGGDense(Chain): """ VGG specific dense block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(VGGDense, self).__init__() with self.init_scope(): self.fc = L.Linear( in_size=in_channels, out_size=out_channels) self.activ = F.relu self.dropout = partial( F.dropout, ratio=0.5) def __call__(self, x): x = self.fc(x) x = self.activ(x) x = self.dropout(x) return x class VGGOutputBlock(Chain): """ VGG specific output block. Parameters: ---------- in_channels : int Number of input channels. classes : int Number of classification classes. """ def __init__(self, in_channels, classes): super(VGGOutputBlock, self).__init__() mid_channels = 4096 with self.init_scope(): self.fc1 = VGGDense( in_channels=in_channels, out_channels=mid_channels) self.fc2 = VGGDense( in_channels=mid_channels, out_channels=mid_channels) self.fc3 = L.Linear( in_size=mid_channels, out_size=classes) def __call__(self, x): x = self.fc1(x) x = self.fc2(x) x = self.fc3(x) return x class VGG(Chain): """ VGG models from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- channels : list of list of int Number of output channels for each unit. use_bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default False Whether to use BatchNorm layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, use_bias=True, use_bn=False, in_channels=3, in_size=(224, 224), classes=1000): super(VGG, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): setattr(stage, "unit{}".format(j + 1), conv3x3_block( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn)) in_channels = out_channels setattr(stage, "pool{}".format(i + 1), partial( F.max_pooling_2d, ksize=2, stride=2, pad=0)) setattr(self.features, "stage{}".format(i + 1), stage) in_channels = in_channels * 7 * 7 self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "classifier", VGGOutputBlock( in_channels=in_channels, classes=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_vgg(blocks, use_bias=True, use_bn=False, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create VGG model with specific parameters. Parameters: ---------- blocks : int Number of blocks. use_bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default False Whether to use BatchNorm layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 11: layers = [1, 1, 2, 2, 2] elif blocks == 13: layers = [2, 2, 2, 2, 2] elif blocks == 16: layers = [2, 2, 3, 3, 3] elif blocks == 19: layers = [2, 2, 4, 4, 4] else: raise ValueError("Unsupported VGG with number of blocks: {}".format(blocks)) channels_per_layers = [64, 128, 256, 512, 512] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = VGG( channels=channels, use_bias=use_bias, use_bn=use_bn, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def vgg11(**kwargs): """ VGG-11 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vgg(blocks=11, model_name="vgg11", **kwargs) def vgg13(**kwargs): """ VGG-13 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vgg(blocks=13, model_name="vgg13", **kwargs) def vgg16(**kwargs): """ VGG-16 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vgg(blocks=16, model_name="vgg16", **kwargs) def vgg19(**kwargs): """ VGG-19 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vgg(blocks=19, model_name="vgg19", **kwargs) def bn_vgg11(**kwargs): """ VGG-11 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vgg(blocks=11, use_bias=False, use_bn=True, model_name="bn_vgg11", **kwargs) def bn_vgg13(**kwargs): """ VGG-13 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vgg(blocks=13, use_bias=False, use_bn=True, model_name="bn_vgg13", **kwargs) def bn_vgg16(**kwargs): """ VGG-16 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vgg(blocks=16, use_bias=False, use_bn=True, model_name="bn_vgg16", **kwargs) def bn_vgg19(**kwargs): """ VGG-19 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vgg(blocks=19, use_bias=False, use_bn=True, model_name="bn_vgg19", **kwargs) def bn_vgg11b(**kwargs): """ VGG-11 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vgg(blocks=11, use_bias=True, use_bn=True, model_name="bn_vgg11b", **kwargs) def bn_vgg13b(**kwargs): """ VGG-13 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vgg(blocks=13, use_bias=True, use_bn=True, model_name="bn_vgg13b", **kwargs) def bn_vgg16b(**kwargs): """ VGG-16 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vgg(blocks=16, use_bias=True, use_bn=True, model_name="bn_vgg16b", **kwargs) def bn_vgg19b(**kwargs): """ VGG-19 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vgg(blocks=19, use_bias=True, use_bn=True, model_name="bn_vgg19b", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ vgg11, vgg13, vgg16, vgg19, bn_vgg11, bn_vgg13, bn_vgg16, bn_vgg19, bn_vgg11b, bn_vgg13b, bn_vgg16b, bn_vgg19b, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != vgg11 or weight_count == 132863336) assert (model != vgg13 or weight_count == 133047848) assert (model != vgg16 or weight_count == 138357544) assert (model != vgg19 or weight_count == 143667240) assert (model != bn_vgg11 or weight_count == 132866088) assert (model != bn_vgg13 or weight_count == 133050792) assert (model != bn_vgg16 or weight_count == 138361768) assert (model != bn_vgg19 or weight_count == 143672744) assert (model != bn_vgg11b or weight_count == 132868840) assert (model != bn_vgg13b or weight_count == 133053736) assert (model != bn_vgg16b or weight_count == 138365992) assert (model != bn_vgg19b or weight_count == 143678248) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
13,926
30.724374
117
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/resnet_cub.py
""" ResNet for CUB-200-2011, implemented in Chainer. Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. """ __all__ = ['resnet10_cub', 'resnet12_cub', 'resnet14_cub', 'resnetbc14b_cub', 'resnet16_cub', 'resnet18_cub', 'resnet26_cub', 'resnetbc26b_cub', 'resnet34_cub', 'resnetbc38b_cub', 'resnet50_cub', 'resnet50b_cub', 'resnet101_cub', 'resnet101b_cub', 'resnet152_cub', 'resnet152b_cub', 'resnet200_cub', 'resnet200b_cub'] from .resnet import get_resnet def resnet10_cub(classes=200, **kwargs): """ ResNet-10 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=10, model_name="resnet10_cub", **kwargs) def resnet12_cub(classes=200, **kwargs): """ ResNet-12 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=12, model_name="resnet12_cub", **kwargs) def resnet14_cub(classes=200, **kwargs): """ ResNet-14 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=14, model_name="resnet14_cub", **kwargs) def resnetbc14b_cub(classes=200, **kwargs): """ ResNet-BC-14b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b_cub", **kwargs) def resnet16_cub(classes=200, **kwargs): """ ResNet-16 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=16, model_name="resnet16_cub", **kwargs) def resnet18_cub(classes=200, **kwargs): """ ResNet-18 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=18, model_name="resnet18_cub", **kwargs) def resnet26_cub(classes=200, **kwargs): """ ResNet-26 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=26, bottleneck=False, model_name="resnet26_cub", **kwargs) def resnetbc26b_cub(classes=200, **kwargs): """ ResNet-BC-26b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b_cub", **kwargs) def resnet34_cub(classes=200, **kwargs): """ ResNet-34 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=34, model_name="resnet34_cub", **kwargs) def resnetbc38b_cub(classes=200, **kwargs): """ ResNet-BC-38b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b_cub", **kwargs) def resnet50_cub(classes=200, **kwargs): """ ResNet-50 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=50, model_name="resnet50_cub", **kwargs) def resnet50b_cub(classes=200, **kwargs): """ ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=50, conv1_stride=False, model_name="resnet50b_cub", **kwargs) def resnet101_cub(classes=200, **kwargs): """ ResNet-101 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=101, model_name="resnet101_cub", **kwargs) def resnet101b_cub(classes=200, **kwargs): """ ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=101, conv1_stride=False, model_name="resnet101b_cub", **kwargs) def resnet152_cub(classes=200, **kwargs): """ ResNet-152 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=152, model_name="resnet152_cub", **kwargs) def resnet152b_cub(classes=200, **kwargs): """ ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=152, conv1_stride=False, model_name="resnet152b_cub", **kwargs) def resnet200_cub(classes=200, **kwargs): """ ResNet-200 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=200, model_name="resnet200_cub", **kwargs) def resnet200b_cub(classes=200, **kwargs): """ ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(classes=classes, blocks=200, conv1_stride=False, model_name="resnet200b_cub", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ resnet10_cub, resnet12_cub, resnet14_cub, resnetbc14b_cub, resnet16_cub, resnet18_cub, resnet26_cub, resnetbc26b_cub, resnet34_cub, resnetbc38b_cub, resnet50_cub, resnet50b_cub, resnet101_cub, resnet101b_cub, resnet152_cub, resnet152b_cub, resnet200_cub, resnet200b_cub, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnet10_cub or weight_count == 5008392) assert (model != resnet12_cub or weight_count == 5082376) assert (model != resnet14_cub or weight_count == 5377800) assert (model != resnetbc14b_cub or weight_count == 8425736) assert (model != resnet16_cub or weight_count == 6558472) assert (model != resnet18_cub or weight_count == 11279112) assert (model != resnet26_cub or weight_count == 17549832) assert (model != resnetbc26b_cub or weight_count == 14355976) assert (model != resnet34_cub or weight_count == 21387272) assert (model != resnetbc38b_cub or weight_count == 20286216) assert (model != resnet50_cub or weight_count == 23917832) assert (model != resnet50b_cub or weight_count == 23917832) assert (model != resnet101_cub or weight_count == 42909960) assert (model != resnet101b_cub or weight_count == 42909960) assert (model != resnet152_cub or weight_count == 58553608) assert (model != resnet152b_cub or weight_count == 58553608) assert (model != resnet200_cub or weight_count == 63034632) assert (model != resnet200b_cub or weight_count == 63034632) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 200)) if __name__ == "__main__": _test()
13,734
34.955497
117
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/bagnet.py
""" BagNet for ImageNet-1K, implemented in Chainer. Original paper: 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,' https://openreview.net/pdf?id=SkfMWhAqYQ. """ __all__ = ['BagNet', 'bagnet9', 'bagnet17', 'bagnet33'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, conv1x1_block, conv3x3_block, ConvBlock, SimpleSequential class BagNetBottleneck(Chain): """ BagNet bottleneck block for residual path in BagNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size of the second convolution. stride : int or tuple/list of 2 int Stride of the second convolution. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, ksize, stride, bottleneck_factor=4): super(BagNetBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = ConvBlock( in_channels=mid_channels, out_channels=mid_channels, ksize=ksize, stride=stride, pad=0) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class BagNetUnit(Chain): """ BagNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size of the second body convolution. stride : int or tuple/list of 2 int Stride of the second body convolution. """ def __init__(self, in_channels, out_channels, ksize, stride): super(BagNetUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): self.body = BagNetBottleneck( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) if x.shape[-1] != identity.shape[-1]: diff = identity.shape[-1] - x.shape[-1] identity = identity[:, :, :-diff, :-diff] x = x + identity x = self.activ(x) return x class BagNetInitBlock(Chain): """ BagNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(BagNetInitBlock, self).__init__() with self.init_scope(): self.conv1 = conv1x1( in_channels=in_channels, out_channels=out_channels) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, pad=0) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class BagNet(Chain): """ BagNet model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,' https://openreview.net/pdf?id=SkfMWhAqYQ. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_pool_size : int Size of the pooling windows for final pool. normal_kernel_sizes : list of int Count of the first units with 3x3 convolution window size for each stage. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_pool_size, normal_kernel_sizes, in_channels=3, in_size=(224, 224), classes=1000): super(BagNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", BagNetInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != len(channels) - 1) else 1 ksize = 3 if j < normal_kernel_sizes[i] else 1 setattr(stage, "unit{}".format(j + 1), BagNetUnit( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=final_pool_size, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_bagnet(field, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create BagNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ layers = [3, 4, 6, 3] if field == 9: normal_kernel_sizes = [1, 1, 0, 0] final_pool_size = 27 elif field == 17: normal_kernel_sizes = [1, 1, 1, 0] final_pool_size = 26 elif field == 33: normal_kernel_sizes = [1, 1, 1, 1] final_pool_size = 24 else: raise ValueError("Unsupported BagNet with field: {}".format(field)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = BagNet( channels=channels, init_block_channels=init_block_channels, final_pool_size=final_pool_size, normal_kernel_sizes=normal_kernel_sizes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def bagnet9(**kwargs): """ BagNet-9 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,' https://openreview.net/pdf?id=SkfMWhAqYQ. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_bagnet(field=9, model_name="bagnet9", **kwargs) def bagnet17(**kwargs): """ BagNet-17 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,' https://openreview.net/pdf?id=SkfMWhAqYQ. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_bagnet(field=17, model_name="bagnet17", **kwargs) def bagnet33(**kwargs): """ BagNet-33 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,' https://openreview.net/pdf?id=SkfMWhAqYQ. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_bagnet(field=33, model_name="bagnet33", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ bagnet9, bagnet17, bagnet33, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != bagnet9 or weight_count == 15688744) assert (model != bagnet17 or weight_count == 16213032) assert (model != bagnet33 or weight_count == 18310184) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
11,198
30.546479
116
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/airnet.py
""" AirNet for ImageNet-1K, implemented in Chainer. Original paper: 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. """ __all__ = ['AirNet', 'airnet50_1x64d_r2', 'airnet50_1x64d_r16', 'airnet101_1x64d_r2', 'AirBlock', 'AirInitBlock'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import SimpleSequential, conv1x1_block, conv3x3_block class AirBlock(Chain): """ AirNet attention block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. groups : int, default 1 Number of groups. ratio: int, default 2 Air compression ratio. """ def __init__(self, in_channels, out_channels, groups=1, ratio=2): super(AirBlock, self).__init__() assert (out_channels % ratio == 0) mid_channels = out_channels // ratio with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, groups=groups) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def __call__(self, x): input_shape = x.shape x = self.conv1(x) x = self.pool(x) x = self.conv2(x) x = F.resize_images(x, output_shape=input_shape[2:]) x = self.conv3(x) x = F.sigmoid(x) return x class AirBottleneck(Chain): """ AirNet bottleneck block for residual path in AirNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. ratio: int Air compression ratio. """ def __init__(self, in_channels, out_channels, stride, ratio): super(AirBottleneck, self).__init__() mid_channels = out_channels // 4 self.use_air_block = (stride == 1 and mid_channels < 512) with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) if self.use_air_block: self.air = AirBlock( in_channels=in_channels, out_channels=mid_channels, ratio=ratio) def __call__(self, x): if self.use_air_block: att = self.air(x) x = self.conv1(x) x = self.conv2(x) if self.use_air_block: x = x * att x = self.conv3(x) return x class AirUnit(Chain): """ AirNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. ratio: int Air compression ratio. """ def __init__(self, in_channels, out_channels, stride, ratio): super(AirUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): self.body = AirBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, ratio=ratio) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class AirInitBlock(Chain): """ AirNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(AirInitBlock, self).__init__() mid_channels = out_channels // 2 with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels) self.conv3 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels) self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.pool(x) return x class AirNet(Chain): """ AirNet model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. ratio: int Air compression ratio. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, ratio, in_channels=3, in_size=(224, 224), classes=1000): super(AirNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", AirInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), AirUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, ratio=ratio)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_airnet(blocks, base_channels, ratio, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create AirNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. base_channels: int Base number of channels. ratio: int Air compression ratio. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] else: raise ValueError("Unsupported AirNet with number of blocks: {}".format(blocks)) bottleneck_expansion = 4 init_block_channels = base_channels channels_per_layers = [base_channels * (2 ** i) * bottleneck_expansion for i in range(len(layers))] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = AirNet( channels=channels, init_block_channels=init_block_channels, ratio=ratio, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def airnet50_1x64d_r2(**kwargs): """ AirNet50-1x64d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_airnet(blocks=50, base_channels=64, ratio=2, model_name="airnet50_1x64d_r2", **kwargs) def airnet50_1x64d_r16(**kwargs): """ AirNet50-1x64d (r=16) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_airnet(blocks=50, base_channels=64, ratio=16, model_name="airnet50_1x64d_r16", **kwargs) def airnet101_1x64d_r2(**kwargs): """ AirNet101-1x64d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_airnet(blocks=101, base_channels=64, ratio=2, model_name="airnet101_1x64d_r2", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ airnet50_1x64d_r2, airnet50_1x64d_r16, airnet101_1x64d_r2, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != airnet50_1x64d_r2 or weight_count == 27425864) assert (model != airnet50_1x64d_r16 or weight_count == 25714952) assert (model != airnet101_1x64d_r2 or weight_count == 51727432) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
12,994
30.01432
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/mnasnet.py
""" MnasNet for ImageNet-1K, implemented in Chainer. Original paper: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. """ __all__ = ['MnasNet', 'mnasnet_b1', 'mnasnet_a1', 'mnasnet_small'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import round_channels, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock,\ SimpleSequential class DwsExpSEResUnit(Chain): """ Depthwise separable expanded residual unit with SE-block. Here it used as MnasNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the second convolution layer. use_kernel3 : bool, default True Whether to use 3x3 (instead of 5x5) kernel. exp_factor : int, default 1 Expansion factor for each unit. se_factor : int, default 0 SE reduction factor for each unit. use_skip : bool, default True Whether to use skip connection. activation : str, default 'relu' Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, stride=1, use_kernel3=True, exp_factor=1, se_factor=0, use_skip=True, activation="relu"): super(DwsExpSEResUnit, self).__init__() assert (exp_factor >= 1) self.residual = (in_channels == out_channels) and (stride == 1) and use_skip self.use_exp_conv = exp_factor > 1 self.use_se = se_factor > 0 mid_channels = exp_factor * in_channels dwconv_block_fn = dwconv3x3_block if use_kernel3 else dwconv5x5_block with self.init_scope(): if self.use_exp_conv: self.exp_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=activation) self.dw_conv = dwconv_block_fn( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=activation) if self.use_se: self.se = SEBlock( channels=mid_channels, reduction=(exp_factor * se_factor), round_mid=False, mid_activation=activation) self.pw_conv = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def __call__(self, x): if self.residual: identity = x if self.use_exp_conv: x = self.exp_conv(x) x = self.dw_conv(x) if self.use_se: x = self.se(x) x = self.pw_conv(x) if self.residual: x = x + identity return x class MnasInitBlock(Chain): """ MnasNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. use_skip : bool Whether to use skip connection in the second block. """ def __init__(self, in_channels, out_channels, mid_channels, use_skip): super(MnasInitBlock, self).__init__() with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2) self.conv2 = DwsExpSEResUnit( in_channels=mid_channels, out_channels=out_channels, use_skip=use_skip) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class MnasFinalBlock(Chain): """ MnasNet specific final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. use_skip : bool Whether to use skip connection in the second block. """ def __init__(self, in_channels, out_channels, mid_channels, use_skip): super(MnasFinalBlock, self).__init__() with self.init_scope(): self.conv1 = DwsExpSEResUnit( in_channels=in_channels, out_channels=mid_channels, exp_factor=6, use_skip=use_skip) self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class MnasNet(Chain): """ MnasNet model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : list of 2 int Number of output channels for the initial unit. final_block_channels : list of 2 int Number of output channels for the final block of the feature extractor. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. exp_factors : list of list of int Expansion factor for each unit. se_factors : list of list of int SE reduction factor for each unit. init_block_use_skip : bool Whether to use skip connection in the initial unit. final_block_use_skip : bool Whether to use skip connection in the final block of the feature extractor. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, kernels3, exp_factors, se_factors, init_block_use_skip, final_block_use_skip, in_channels=3, in_size=(224, 224), classes=1000): super(MnasNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", MnasInitBlock( in_channels=in_channels, out_channels=init_block_channels[1], mid_channels=init_block_channels[0], use_skip=init_block_use_skip)) in_channels = init_block_channels[1] for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) else 1 use_kernel3 = kernels3[i][j] == 1 exp_factor = exp_factors[i][j] se_factor = se_factors[i][j] setattr(stage, "unit{}".format(j + 1), DwsExpSEResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, use_kernel3=use_kernel3, exp_factor=exp_factor, se_factor=se_factor)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", MnasFinalBlock( in_channels=in_channels, out_channels=final_block_channels[1], mid_channels=final_block_channels[0], use_skip=final_block_use_skip)) in_channels = final_block_channels[1] setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_mnasnet(version, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create MnasNet model with specific parameters. Parameters: ---------- version : str Version of MobileNetV3 ('b1', 'a1' or 'small'). width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if version == "b1": init_block_channels = [32, 16] final_block_channels = [320, 1280] channels = [[24, 24, 24], [40, 40, 40], [80, 80, 80, 96, 96], [192, 192, 192, 192]] kernels3 = [[1, 1, 1], [0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 0, 0]] exp_factors = [[3, 3, 3], [3, 3, 3], [6, 6, 6, 6, 6], [6, 6, 6, 6]] se_factors = [[0, 0, 0], [0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0]] init_block_use_skip = False final_block_use_skip = False elif version == "a1": init_block_channels = [32, 16] final_block_channels = [320, 1280] channels = [[24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]] kernels3 = [[1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]] exp_factors = [[6, 6], [3, 3, 3], [6, 6, 6, 6, 6, 6], [6, 6, 6]] se_factors = [[0, 0], [4, 4, 4], [0, 0, 0, 0, 4, 4], [4, 4, 4]] init_block_use_skip = False final_block_use_skip = True elif version == "small": init_block_channels = [8, 8] final_block_channels = [144, 1280] channels = [[16], [16, 16], [32, 32, 32, 32, 32, 32, 32], [88, 88, 88]] kernels3 = [[1], [1, 1], [0, 0, 0, 0, 1, 1, 1], [0, 0, 0]] exp_factors = [[3], [6, 6], [6, 6, 6, 6, 6, 6, 6], [6, 6, 6]] se_factors = [[0], [0, 0], [4, 4, 4, 4, 4, 4, 4], [4, 4, 4]] init_block_use_skip = True final_block_use_skip = True else: raise ValueError("Unsupported MnasNet version {}".format(version)) if width_scale != 1.0: channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = round_channels(init_block_channels * width_scale) net = MnasNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, kernels3=kernels3, exp_factors=exp_factors, se_factors=se_factors, init_block_use_skip=init_block_use_skip, final_block_use_skip=final_block_use_skip, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def mnasnet_b1(**kwargs): """ MnasNet-B1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mnasnet(version="b1", width_scale=1.0, model_name="mnasnet_b1", **kwargs) def mnasnet_a1(**kwargs): """ MnasNet-A1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mnasnet(version="a1", width_scale=1.0, model_name="mnasnet_a1", **kwargs) def mnasnet_small(**kwargs): """ MnasNet-Small model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mnasnet(version="small", width_scale=1.0, model_name="mnasnet_small", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ mnasnet_b1, mnasnet_a1, mnasnet_small, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != mnasnet_b1 or weight_count == 4383312) assert (model != mnasnet_a1 or weight_count == 3887038) assert (model != mnasnet_small or weight_count == 2030264) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
14,700
33.91924
118
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/pyramidnet_cifar.py
""" PyramidNet for CIFAR/SVHN, implemented in Chainer. Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. """ __all__ = ['CIFARPyramidNet', 'pyramidnet110_a48_cifar10', 'pyramidnet110_a48_cifar100', 'pyramidnet110_a48_svhn', 'pyramidnet110_a84_cifar10', 'pyramidnet110_a84_cifar100', 'pyramidnet110_a84_svhn', 'pyramidnet110_a270_cifar10', 'pyramidnet110_a270_cifar100', 'pyramidnet110_a270_svhn', 'pyramidnet164_a270_bn_cifar10', 'pyramidnet164_a270_bn_cifar100', 'pyramidnet164_a270_bn_svhn', 'pyramidnet200_a240_bn_cifar10', 'pyramidnet200_a240_bn_cifar100', 'pyramidnet200_a240_bn_svhn', 'pyramidnet236_a220_bn_cifar10', 'pyramidnet236_a220_bn_cifar100', 'pyramidnet236_a220_bn_svhn', 'pyramidnet272_a200_bn_cifar10', 'pyramidnet272_a200_bn_cifar100', 'pyramidnet272_a200_bn_svhn'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3_block, SimpleSequential from .preresnet import PreResActivation from .pyramidnet import PyrUnit class CIFARPyramidNet(Chain): """ PyramidNet model for CIFAR from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), classes=10): super(CIFARPyramidNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, activation=None)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), PyrUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, 'post_activ', PreResActivation(in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_pyramidnet_cifar(classes, blocks, alpha, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create PyramidNet for CIFAR model with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. alpha : int PyramidNet's alpha value. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 init_block_channels = 16 growth_add = float(alpha) / float(sum(layers)) from functools import reduce channels = reduce( lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]], layers, [[init_block_channels]])[1:] channels = [[int(round(cij)) for cij in ci] for ci in channels] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARPyramidNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def pyramidnet110_a48_cifar10(classes=10, **kwargs): """ PyramidNet-110 (a=48) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=110, alpha=48, bottleneck=False, model_name="pyramidnet110_a48_cifar10", **kwargs) def pyramidnet110_a48_cifar100(classes=100, **kwargs): """ PyramidNet-110 (a=48) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=110, alpha=48, bottleneck=False, model_name="pyramidnet110_a48_cifar100", **kwargs) def pyramidnet110_a48_svhn(classes=10, **kwargs): """ PyramidNet-110 (a=48) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=110, alpha=48, bottleneck=False, model_name="pyramidnet110_a48_svhn", **kwargs) def pyramidnet110_a84_cifar10(classes=10, **kwargs): """ PyramidNet-110 (a=84) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=110, alpha=84, bottleneck=False, model_name="pyramidnet110_a84_cifar10", **kwargs) def pyramidnet110_a84_cifar100(classes=100, **kwargs): """ PyramidNet-110 (a=84) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=110, alpha=84, bottleneck=False, model_name="pyramidnet110_a84_cifar100", **kwargs) def pyramidnet110_a84_svhn(classes=10, **kwargs): """ PyramidNet-110 (a=84) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=110, alpha=84, bottleneck=False, model_name="pyramidnet110_a84_svhn", **kwargs) def pyramidnet110_a270_cifar10(classes=10, **kwargs): """ PyramidNet-110 (a=270) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=110, alpha=270, bottleneck=False, model_name="pyramidnet110_a270_cifar10", **kwargs) def pyramidnet110_a270_cifar100(classes=100, **kwargs): """ PyramidNet-110 (a=270) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=110, alpha=270, bottleneck=False, model_name="pyramidnet110_a270_cifar100", **kwargs) def pyramidnet110_a270_svhn(classes=10, **kwargs): """ PyramidNet-110 (a=270) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=110, alpha=270, bottleneck=False, model_name="pyramidnet110_a270_svhn", **kwargs) def pyramidnet164_a270_bn_cifar10(classes=10, **kwargs): """ PyramidNet-164 (a=270, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=164, alpha=270, bottleneck=True, model_name="pyramidnet164_a270_bn_cifar10", **kwargs) def pyramidnet164_a270_bn_cifar100(classes=100, **kwargs): """ PyramidNet-164 (a=270, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=164, alpha=270, bottleneck=True, model_name="pyramidnet164_a270_bn_cifar100", **kwargs) def pyramidnet164_a270_bn_svhn(classes=10, **kwargs): """ PyramidNet-164 (a=270, bn) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=164, alpha=270, bottleneck=True, model_name="pyramidnet164_a270_bn_svhn", **kwargs) def pyramidnet200_a240_bn_cifar10(classes=10, **kwargs): """ PyramidNet-200 (a=240, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=200, alpha=240, bottleneck=True, model_name="pyramidnet200_a240_bn_cifar10", **kwargs) def pyramidnet200_a240_bn_cifar100(classes=100, **kwargs): """ PyramidNet-200 (a=240, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=200, alpha=240, bottleneck=True, model_name="pyramidnet200_a240_bn_cifar100", **kwargs) def pyramidnet200_a240_bn_svhn(classes=10, **kwargs): """ PyramidNet-200 (a=240, bn) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=200, alpha=240, bottleneck=True, model_name="pyramidnet200_a240_bn_svhn", **kwargs) def pyramidnet236_a220_bn_cifar10(classes=10, **kwargs): """ PyramidNet-236 (a=220, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=236, alpha=220, bottleneck=True, model_name="pyramidnet236_a220_bn_cifar10", **kwargs) def pyramidnet236_a220_bn_cifar100(classes=100, **kwargs): """ PyramidNet-236 (a=220, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=236, alpha=220, bottleneck=True, model_name="pyramidnet236_a220_bn_cifar100", **kwargs) def pyramidnet236_a220_bn_svhn(classes=10, **kwargs): """ PyramidNet-236 (a=220, bn) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=236, alpha=220, bottleneck=True, model_name="pyramidnet236_a220_bn_svhn", **kwargs) def pyramidnet272_a200_bn_cifar10(classes=10, **kwargs): """ PyramidNet-272 (a=200, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=272, alpha=200, bottleneck=True, model_name="pyramidnet272_a200_bn_cifar10", **kwargs) def pyramidnet272_a200_bn_cifar100(classes=100, **kwargs): """ PyramidNet-272 (a=200, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=272, alpha=200, bottleneck=True, model_name="pyramidnet272_a200_bn_cifar100", **kwargs) def pyramidnet272_a200_bn_svhn(classes=10, **kwargs): """ PyramidNet-272 (a=200, bn) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=272, alpha=200, bottleneck=True, model_name="pyramidnet272_a200_bn_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (pyramidnet110_a48_cifar10, 10), (pyramidnet110_a48_cifar100, 100), (pyramidnet110_a48_svhn, 10), (pyramidnet110_a84_cifar10, 10), (pyramidnet110_a84_cifar100, 100), (pyramidnet110_a84_svhn, 10), (pyramidnet110_a270_cifar10, 10), (pyramidnet110_a270_cifar100, 100), (pyramidnet110_a270_svhn, 10), (pyramidnet164_a270_bn_cifar10, 10), (pyramidnet164_a270_bn_cifar100, 100), (pyramidnet164_a270_bn_svhn, 10), (pyramidnet200_a240_bn_cifar10, 10), (pyramidnet200_a240_bn_cifar100, 100), (pyramidnet200_a240_bn_svhn, 10), (pyramidnet236_a220_bn_cifar10, 10), (pyramidnet236_a220_bn_cifar100, 100), (pyramidnet236_a220_bn_svhn, 10), (pyramidnet272_a200_bn_cifar10, 10), (pyramidnet272_a200_bn_cifar100, 100), (pyramidnet272_a200_bn_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained, classes=classes) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != pyramidnet110_a48_cifar10 or weight_count == 1772706) assert (model != pyramidnet110_a48_cifar100 or weight_count == 1778556) assert (model != pyramidnet110_a48_svhn or weight_count == 1772706) assert (model != pyramidnet110_a84_cifar10 or weight_count == 3904446) assert (model != pyramidnet110_a84_cifar100 or weight_count == 3913536) assert (model != pyramidnet110_a84_svhn or weight_count == 3904446) assert (model != pyramidnet110_a270_cifar10 or weight_count == 28485477) assert (model != pyramidnet110_a270_cifar100 or weight_count == 28511307) assert (model != pyramidnet110_a270_svhn or weight_count == 28485477) assert (model != pyramidnet164_a270_bn_cifar10 or weight_count == 27216021) assert (model != pyramidnet164_a270_bn_cifar100 or weight_count == 27319071) assert (model != pyramidnet164_a270_bn_svhn or weight_count == 27216021) assert (model != pyramidnet200_a240_bn_cifar10 or weight_count == 26752702) assert (model != pyramidnet200_a240_bn_cifar100 or weight_count == 26844952) assert (model != pyramidnet200_a240_bn_svhn or weight_count == 26752702) assert (model != pyramidnet236_a220_bn_cifar10 or weight_count == 26969046) assert (model != pyramidnet236_a220_bn_cifar100 or weight_count == 27054096) assert (model != pyramidnet236_a220_bn_svhn or weight_count == 26969046) assert (model != pyramidnet272_a200_bn_cifar10 or weight_count == 26210842) assert (model != pyramidnet272_a200_bn_cifar100 or weight_count == 26288692) assert (model != pyramidnet272_a200_bn_svhn or weight_count == 26210842) x = np.zeros((1, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (1, classes)) if __name__ == "__main__": _test()
23,657
32.509915
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/preresnet_cifar.py
""" PreResNet for CIFAR/SVHN, implemented in Chainer. Original papers: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. """ __all__ = ['CIFARPreResNet', 'preresnet20_cifar10', 'preresnet20_cifar100', 'preresnet20_svhn', 'preresnet56_cifar10', 'preresnet56_cifar100', 'preresnet56_svhn', 'preresnet110_cifar10', 'preresnet110_cifar100', 'preresnet110_svhn', 'preresnet164bn_cifar10', 'preresnet164bn_cifar100', 'preresnet164bn_svhn', 'preresnet272bn_cifar10', 'preresnet272bn_cifar100', 'preresnet272bn_svhn', 'preresnet542bn_cifar10', 'preresnet542bn_cifar100', 'preresnet542bn_svhn', 'preresnet1001_cifar10', 'preresnet1001_cifar100', 'preresnet1001_svhn', 'preresnet1202_cifar10', 'preresnet1202_cifar100', 'preresnet1202_svhn'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3, SimpleSequential from .preresnet import PreResUnit, PreResActivation class CIFARPreResNet(Chain): """ PreResNet model for CIFAR from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), classes=10): super(CIFARPreResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), PreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "post_activ", PreResActivation( in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_preresnet_cifar(classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create PreResNet model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARPreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def preresnet20_cifar10(classes=10, **kwargs): """ PreResNet-20 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_cifar10", **kwargs) def preresnet20_cifar100(classes=100, **kwargs): """ PreResNet-20 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_cifar100", **kwargs) def preresnet20_svhn(classes=10, **kwargs): """ PreResNet-20 model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_svhn", **kwargs) def preresnet56_cifar10(classes=10, **kwargs): """ PreResNet-56 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_cifar10", **kwargs) def preresnet56_cifar100(classes=100, **kwargs): """ PreResNet-56 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_cifar100", **kwargs) def preresnet56_svhn(classes=10, **kwargs): """ PreResNet-56 model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_svhn", **kwargs) def preresnet110_cifar10(classes=10, **kwargs): """ PreResNet-110 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_cifar10", **kwargs) def preresnet110_cifar100(classes=100, **kwargs): """ PreResNet-110 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_cifar100", **kwargs) def preresnet110_svhn(classes=10, **kwargs): """ PreResNet-110 model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_svhn", **kwargs) def preresnet164bn_cifar10(classes=10, **kwargs): """ PreResNet-164(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_cifar10", **kwargs) def preresnet164bn_cifar100(classes=100, **kwargs): """ PreResNet-164(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_cifar100", **kwargs) def preresnet164bn_svhn(classes=10, **kwargs): """ PreResNet-164(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_svhn", **kwargs) def preresnet272bn_cifar10(classes=10, **kwargs): """ PreResNet-272(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="preresnet272bn_cifar10", **kwargs) def preresnet272bn_cifar100(classes=100, **kwargs): """ PreResNet-272(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="preresnet272bn_cifar100", **kwargs) def preresnet272bn_svhn(classes=10, **kwargs): """ PreResNet-272(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="preresnet272bn_svhn", **kwargs) def preresnet542bn_cifar10(classes=10, **kwargs): """ PreResNet-542(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="preresnet542bn_cifar10", **kwargs) def preresnet542bn_cifar100(classes=100, **kwargs): """ PreResNet-542(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="preresnet542bn_cifar100", **kwargs) def preresnet542bn_svhn(classes=10, **kwargs): """ PreResNet-542(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="preresnet542bn_svhn", **kwargs) def preresnet1001_cifar10(classes=10, **kwargs): """ PreResNet-1001 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_cifar10", **kwargs) def preresnet1001_cifar100(classes=100, **kwargs): """ PreResNet-1001 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_cifar100", **kwargs) def preresnet1001_svhn(classes=10, **kwargs): """ PreResNet-1001 model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_svhn", **kwargs) def preresnet1202_cifar10(classes=10, **kwargs): """ PreResNet-1202 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_cifar10", **kwargs) def preresnet1202_cifar100(classes=100, **kwargs): """ PreResNet-1202 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_cifar100", **kwargs) def preresnet1202_svhn(classes=10, **kwargs): """ PreResNet-1202 model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (preresnet20_cifar10, 10), (preresnet20_cifar100, 100), (preresnet20_svhn, 10), (preresnet56_cifar10, 10), (preresnet56_cifar100, 100), (preresnet56_svhn, 10), (preresnet110_cifar10, 10), (preresnet110_cifar100, 100), (preresnet110_svhn, 10), (preresnet164bn_cifar10, 10), (preresnet164bn_cifar100, 100), (preresnet164bn_svhn, 10), (preresnet272bn_cifar10, 10), (preresnet272bn_cifar100, 100), (preresnet272bn_svhn, 10), (preresnet542bn_cifar10, 10), (preresnet542bn_cifar100, 100), (preresnet542bn_svhn, 10), (preresnet1001_cifar10, 10), (preresnet1001_cifar100, 100), (preresnet1001_svhn, 10), (preresnet1202_cifar10, 10), (preresnet1202_cifar100, 100), (preresnet1202_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != preresnet20_cifar10 or weight_count == 272282) assert (model != preresnet20_cifar100 or weight_count == 278132) assert (model != preresnet20_svhn or weight_count == 272282) assert (model != preresnet56_cifar10 or weight_count == 855578) assert (model != preresnet56_cifar100 or weight_count == 861428) assert (model != preresnet56_svhn or weight_count == 855578) assert (model != preresnet110_cifar10 or weight_count == 1730522) assert (model != preresnet110_cifar100 or weight_count == 1736372) assert (model != preresnet110_svhn or weight_count == 1730522) assert (model != preresnet164bn_cifar10 or weight_count == 1703258) assert (model != preresnet164bn_cifar100 or weight_count == 1726388) assert (model != preresnet164bn_svhn or weight_count == 1703258) assert (model != preresnet272bn_cifar10 or weight_count == 2816090) assert (model != preresnet272bn_cifar100 or weight_count == 2839220) assert (model != preresnet272bn_svhn or weight_count == 2816090) assert (model != preresnet542bn_cifar10 or weight_count == 5598170) assert (model != preresnet542bn_cifar100 or weight_count == 5621300) assert (model != preresnet542bn_svhn or weight_count == 5598170) assert (model != preresnet1001_cifar10 or weight_count == 10327706) assert (model != preresnet1001_cifar100 or weight_count == 10350836) assert (model != preresnet1001_svhn or weight_count == 10327706) assert (model != preresnet1202_cifar10 or weight_count == 19423834) assert (model != preresnet1202_cifar100 or weight_count == 19429684) assert (model != preresnet1202_svhn or weight_count == 19423834) x = np.zeros((1, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (1, classes)) if __name__ == "__main__": _test()
24,311
35.892261
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/alphapose_coco.py
""" AlphaPose for COCO Keypoint, implemented in Chainer. Original paper: 'RMPE: Regional Multi-person Pose Estimation,' https://arxiv.org/abs/1612.00137. """ __all__ = ['AlphaPose', 'alphapose_fastseresnet101b_coco'] import os from chainer import Chain from chainer.serializers import load_npz from .common import conv3x3, PixelShuffle, DucBlock, HeatmapMaxDetBlock, SimpleSequential from .fastseresnet import fastseresnet101b class AlphaPose(Chain): """ AlphaPose model from 'RMPE: Regional Multi-person Pose Estimation,' https://arxiv.org/abs/1612.00137. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. channels : list of int Number of output channels for each decoder unit. return_heatmap : bool, default False Whether to return only heatmap. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (256, 192) Spatial size of the expected input image. keypoints : int, default 17 Number of keypoints. """ def __init__(self, backbone, backbone_out_channels, channels, return_heatmap=False, in_channels=3, in_size=(256, 192), keypoints=17, **kwargs): super(AlphaPose, self).__init__(**kwargs) assert (in_channels == 3) self.in_size = in_size self.keypoints = keypoints self.return_heatmap = return_heatmap with self.init_scope(): self.backbone = backbone self.decoder = SimpleSequential() with self.decoder.init_scope(): setattr(self.decoder, "init_block", PixelShuffle(scale_factor=2)) in_channels = backbone_out_channels // 4 for i, out_channels in enumerate(channels): setattr(self.decoder, "unit{}".format(i + 1), DucBlock( in_channels=in_channels, out_channels=out_channels, scale_factor=2)) in_channels = out_channels setattr(self.decoder, "final_block", conv3x3( in_channels=in_channels, out_channels=keypoints, use_bias=True)) self.heatmap_max_det = HeatmapMaxDetBlock() def __call__(self, x): x = self.backbone(x) # return self.decoder.el(0)(x) # return self.decoder.el(1)(self.decoder.el(0)(x)) # print(y[0, 0].array) heatmap = self.decoder(x) if self.return_heatmap: return heatmap else: keypoints = self.heatmap_max_det(heatmap) return keypoints def get_alphapose(backbone, backbone_out_channels, keypoints, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create AlphaPose model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. keypoints : int Number of keypoints. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ channels = [256, 128] net = AlphaPose( backbone=backbone, backbone_out_channels=backbone_out_channels, channels=channels, keypoints=keypoints, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def alphapose_fastseresnet101b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ AlphaPose model on the base of ResNet-101b for COCO Keypoint from 'RMPE: Regional Multi-person Pose Estimation,' https://arxiv.org/abs/1612.00137. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = fastseresnet101b(pretrained=pretrained_backbone).features del backbone.final_pool return get_alphapose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="alphapose_fastseresnet101b_coco", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False in_size = (256, 192) keypoints = 17 return_heatmap = False pretrained = False models = [ alphapose_fastseresnet101b_coco, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != alphapose_fastseresnet101b_coco or weight_count == 59569873) batch = 14 x = np.random.rand(batch, 3, in_size[0], in_size[1]).astype(np.float32) y = net(x) assert ((y.shape[0] == batch) and (y.shape[1] == keypoints)) if return_heatmap: assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4)) else: assert (y.shape[2] == 3) if __name__ == "__main__": _test()
6,223
32.283422
116
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/pyramidnet.py
""" PyramidNet for ImageNet-1K, implemented in Chainer. Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. """ __all__ = ['PyramidNet', 'pyramidnet101_a360', 'PyrUnit'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import pre_conv1x1_block, pre_conv3x3_block, SimpleSequential from .preresnet import PreResActivation class PyrBlock(Chain): """ Simple PyramidNet block for residual path in PyramidNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(PyrBlock, self).__init__() with self.init_scope(): self.conv1 = pre_conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activate=False) self.conv2 = pre_conv3x3_block( in_channels=out_channels, out_channels=out_channels) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class PyrBottleneck(Chain): """ PyramidNet bottleneck block for residual path in PyramidNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(PyrBottleneck, self).__init__() mid_channels = out_channels // 4 with self.init_scope(): self.conv1 = pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activate=False) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride) self.conv3 = pre_conv1x1_block( in_channels=mid_channels, out_channels=out_channels) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class PyrUnit(Chain): """ PyramidNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. """ def __init__(self, in_channels, out_channels, stride, bottleneck): super(PyrUnit, self).__init__() assert (out_channels >= in_channels) self.resize_identity = (stride != 1) if out_channels > in_channels: self.identity_pad_width = ((0, 0), (0, out_channels - in_channels), (0, 0), (0, 0)) else: self.identity_pad_width = None with self.init_scope(): if bottleneck: self.body = PyrBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride) else: self.body = PyrBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5) if self.resize_identity: self.identity_pool = partial( F.average_pooling_2d, ksize=2, stride=stride) def __call__(self, x): identity = x x = self.body(x) x = self.bn(x) if self.resize_identity: identity = self.identity_pool(identity) if self.identity_pad_width is not None: identity = F.pad(identity, pad_width=self.identity_pad_width, mode="constant", constant_values=0) x = x + identity return x class PyrInitBlock(Chain): """ PyramidNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(PyrInitBlock, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=7, stride=2, pad=3, nobias=True) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5) self.activ = F.relu self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) def __call__(self, x): x = self.conv(x) x = self.bn(x) x = self.activ(x) x = self.pool(x) return x class PyramidNet(Chain): """ PyramidNet model from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(224, 224), classes=1000): super(PyramidNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", PyrInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), PyrUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, 'post_activ', PreResActivation(in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_pyramidnet(blocks, alpha, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create PyramidNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. alpha : int PyramidNet's alpha value. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14: layers = [2, 2, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 growth_add = float(alpha) / float(sum(layers)) from functools import reduce channels = reduce( lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]], layers, [[init_block_channels]])[1:] channels = [[int(round(cij)) for cij in ci] for ci in channels] if blocks < 50: bottleneck = False else: bottleneck = True channels = [[cij * 4 for cij in ci] for ci in channels] net = PyramidNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def pyramidnet101_a360(**kwargs): """ PyramidNet-101 model from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_pyramidnet(blocks=101, alpha=360, model_name="pyramidnet101_a360", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ pyramidnet101_a360, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != pyramidnet101_a360 or weight_count == 42455070) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
11,789
29.544041
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/seresnet.py
""" SE-ResNet for ImageNet-1K, implemented in Chainer. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['SEResNet', 'seresnet10', 'seresnet12', 'seresnet14', 'seresnet16', 'seresnet18', 'seresnet26', 'seresnetbc26b', 'seresnet34', 'seresnetbc38b', 'seresnet50', 'seresnet50b', 'seresnet101', 'seresnet101b', 'seresnet152', 'seresnet152b', 'seresnet200', 'seresnet200b', 'SEResUnit', 'get_seresnet'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, SEBlock, SimpleSequential from .resnet import ResBlock, ResBottleneck, ResInitBlock class SEResUnit(Chain): """ SE-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, bottleneck, conv1_stride): super(SEResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) self.se = SEBlock(channels=out_channels) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = self.se(x) x = x + identity x = self.activ(x) return x class SEResNet(Chain): """ SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000): super(SEResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), SEResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_seresnet(blocks, bottleneck=None, conv1_stride=True, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create SE-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported SE-ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = SEResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def seresnet10(**kwargs): """ SE-ResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=10, model_name="seresnet10", **kwargs) def seresnet12(**kwargs): """ SE-ResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=12, model_name="seresnet12", **kwargs) def seresnet14(**kwargs): """ SE-ResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=14, model_name="seresnet14", **kwargs) def seresnet16(**kwargs): """ SE-ResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=16, model_name="seresnet16", **kwargs) def seresnet18(**kwargs): """ SE-ResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=18, model_name="seresnet18", **kwargs) def seresnet26(**kwargs): """ SE-ResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=26, bottleneck=False, model_name="seresnet26", **kwargs) def seresnetbc26b(**kwargs): """ SE-ResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="seresnetbc26b", **kwargs) def seresnet34(**kwargs): """ SE-ResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=34, model_name="seresnet34", **kwargs) def seresnetbc38b(**kwargs): """ SE-ResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="seresnetbc38b", **kwargs) def seresnet50(**kwargs): """ SE-ResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=50, model_name="seresnet50", **kwargs) def seresnet50b(**kwargs): """ SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=50, conv1_stride=False, model_name="seresnet50b", **kwargs) def seresnet101(**kwargs): """ SE-ResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=101, model_name="seresnet101", **kwargs) def seresnet101b(**kwargs): """ SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=101, conv1_stride=False, model_name="seresnet101b", **kwargs) def seresnet152(**kwargs): """ SE-ResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=152, model_name="seresnet152", **kwargs) def seresnet152b(**kwargs): """ SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=152, conv1_stride=False, model_name="seresnet152b", **kwargs) def seresnet200(**kwargs): """ SE-ResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=200, model_name="seresnet200", **kwargs) def seresnet200b(**kwargs): """ SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(blocks=200, conv1_stride=False, model_name="seresnet200b", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ seresnet10, seresnet12, seresnet14, seresnet16, seresnet18, seresnet26, seresnetbc26b, seresnet34, seresnetbc38b, seresnet50, seresnet50b, seresnet101, seresnet101b, seresnet152, seresnet152b, seresnet200, seresnet200b, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != seresnet10 or weight_count == 5463332) assert (model != seresnet12 or weight_count == 5537896) assert (model != seresnet14 or weight_count == 5835504) assert (model != seresnet16 or weight_count == 7024640) assert (model != seresnet18 or weight_count == 11778592) assert (model != seresnet26 or weight_count == 18093852) assert (model != seresnetbc26b or weight_count == 17395976) assert (model != seresnet34 or weight_count == 21958868) assert (model != seresnetbc38b or weight_count == 24026616) assert (model != seresnet50 or weight_count == 28088024) assert (model != seresnet50b or weight_count == 28088024) assert (model != seresnet101 or weight_count == 49326872) assert (model != seresnet101b or weight_count == 49326872) assert (model != seresnet152 or weight_count == 66821848) assert (model != seresnet152b or weight_count == 66821848) assert (model != seresnet200 or weight_count == 71835864) assert (model != seresnet200b or weight_count == 71835864) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
18,495
32.446655
118
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/seresnet_cub.py
""" SE-ResNet for CUB-200-2011, implemented in Chainer. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['seresnet10_cub', 'seresnet12_cub', 'seresnet14_cub', 'seresnetbc14b_cub', 'seresnet16_cub', 'seresnet18_cub', 'seresnet26_cub', 'seresnetbc26b_cub', 'seresnet34_cub', 'seresnetbc38b_cub', 'seresnet50_cub', 'seresnet50b_cub', 'seresnet101_cub', 'seresnet101b_cub', 'seresnet152_cub', 'seresnet152b_cub', 'seresnet200_cub', 'seresnet200b_cub'] from .seresnet import get_seresnet def seresnet10_cub(classes=200, **kwargs): """ SE-ResNet-10 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=10, model_name="seresnet10_cub", **kwargs) def seresnet12_cub(classes=200, **kwargs): """ SE-ResNet-12 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=12, model_name="seresnet12_cub", **kwargs) def seresnet14_cub(classes=200, **kwargs): """ SE-ResNet-14 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=14, model_name="seresnet14_cub", **kwargs) def seresnetbc14b_cub(classes=200, **kwargs): """ SE-ResNet-BC-14b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model (bottleneck compressed). Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=14, bottleneck=True, conv1_stride=False, model_name="seresnetbc14b_cub", **kwargs) def seresnet16_cub(classes=200, **kwargs): """ SE-ResNet-16 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=16, model_name="seresnet16_cub", **kwargs) def seresnet18_cub(classes=200, **kwargs): """ SE-ResNet-18 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=18, model_name="seresnet18_cub", **kwargs) def seresnet26_cub(classes=200, **kwargs): """ SE-ResNet-26 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=26, bottleneck=False, model_name="seresnet26_cub", **kwargs) def seresnetbc26b_cub(classes=200, **kwargs): """ SE-ResNet-BC-26b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model (bottleneck compressed). Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=26, bottleneck=True, conv1_stride=False, model_name="seresnetbc26b_cub", **kwargs) def seresnet34_cub(classes=200, **kwargs): """ SE-ResNet-34 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=34, model_name="seresnet34_cub", **kwargs) def seresnetbc38b_cub(classes=200, **kwargs): """ SE-ResNet-BC-38b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model (bottleneck compressed). Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=38, bottleneck=True, conv1_stride=False, model_name="seresnetbc38b_cub", **kwargs) def seresnet50_cub(classes=200, **kwargs): """ SE-ResNet-50 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=50, model_name="seresnet50_cub", **kwargs) def seresnet50b_cub(classes=200, **kwargs): """ SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=50, conv1_stride=False, model_name="seresnet50b_cub", **kwargs) def seresnet101_cub(classes=200, **kwargs): """ SE-ResNet-101 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=101, model_name="seresnet101_cub", **kwargs) def seresnet101b_cub(classes=200, **kwargs): """ SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=101, conv1_stride=False, model_name="seresnet101b_cub", **kwargs) def seresnet152_cub(classes=200, **kwargs): """ SE-ResNet-152 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=152, model_name="seresnet152_cub", **kwargs) def seresnet152b_cub(classes=200, **kwargs): """ SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=152, conv1_stride=False, model_name="seresnet152b_cub", **kwargs) def seresnet200_cub(classes=200, **kwargs): """ SE-ResNet-200 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=200, model_name="seresnet200_cub", **kwargs) def seresnet200b_cub(classes=200, **kwargs): """ SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=200, conv1_stride=False, model_name="seresnet200b_cub", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ seresnet10_cub, seresnet12_cub, seresnet14_cub, seresnetbc14b_cub, seresnet16_cub, seresnet18_cub, seresnet26_cub, seresnetbc26b_cub, seresnet34_cub, seresnetbc38b_cub, seresnet50_cub, seresnet50b_cub, seresnet101_cub, seresnet101b_cub, seresnet152_cub, seresnet152b_cub, seresnet200_cub, seresnet200b_cub, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != seresnet10_cub or weight_count == 5052932) assert (model != seresnet12_cub or weight_count == 5127496) assert (model != seresnet14_cub or weight_count == 5425104) assert (model != seresnetbc14b_cub or weight_count == 9126136) assert (model != seresnet16_cub or weight_count == 6614240) assert (model != seresnet18_cub or weight_count == 11368192) assert (model != seresnet26_cub or weight_count == 17683452) assert (model != seresnetbc26b_cub or weight_count == 15756776) assert (model != seresnet34_cub or weight_count == 21548468) assert (model != seresnetbc38b_cub or weight_count == 22387416) assert (model != seresnet50_cub or weight_count == 26448824) assert (model != seresnet50b_cub or weight_count == 26448824) assert (model != seresnet101_cub or weight_count == 47687672) assert (model != seresnet101b_cub or weight_count == 47687672) assert (model != seresnet152_cub or weight_count == 65182648) assert (model != seresnet152b_cub or weight_count == 65182648) assert (model != seresnet200_cub or weight_count == 70196664) assert (model != seresnet200b_cub or weight_count == 70196664) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 200)) if __name__ == "__main__": _test()
13,761
35.407407
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/densenet.py
""" DenseNet for ImageNet-1K, implemented in Chainer. Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. """ __all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201', 'DenseUnit', 'TransitionBlock'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import pre_conv1x1_block, pre_conv3x3_block, SimpleSequential from .preresnet import PreResInitBlock, PreResActivation class DenseUnit(Chain): """ DenseNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels, dropout_rate): super(DenseUnit, self).__init__() self.use_dropout = (dropout_rate != 0.0) bn_size = 4 inc_channels = out_channels - in_channels mid_channels = inc_channels * bn_size with self.init_scope(): self.conv1 = pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=inc_channels) if self.use_dropout: self.dropout = partial( F.dropout, ratio=dropout_rate) def __call__(self, x): identity = x x = self.conv1(x) x = self.conv2(x) if self.use_dropout: x = self.dropout(x) x = F.concat((identity, x), axis=1) return x class TransitionBlock(Chain): """ DenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the first unit of each stage. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(TransitionBlock, self).__init__() with self.init_scope(): self.conv = pre_conv1x1_block( in_channels=in_channels, out_channels=out_channels) self.pool = partial( F.average_pooling_2d, ksize=2, stride=2, pad=0) def __call__(self, x): x = self.conv(x) x = self.pool(x) return x class DenseNet(Chain): """ DenseNet model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, dropout_rate=0.0, in_channels=3, in_size=(224, 224), classes=1000): super(DenseNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): if i != 0: setattr(stage, "trans{}".format(i + 1), TransitionBlock( in_channels=in_channels, out_channels=(in_channels // 2))) in_channels = in_channels // 2 for j, out_channels in enumerate(channels_per_stage): setattr(stage, "unit{}".format(j + 1), DenseUnit( in_channels=in_channels, out_channels=out_channels, dropout_rate=dropout_rate)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "post_activ", PreResActivation(in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_densenet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create DenseNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 121: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 24, 16] elif blocks == 161: init_block_channels = 96 growth_rate = 48 layers = [6, 12, 36, 24] elif blocks == 169: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 32, 32] elif blocks == 201: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 48, 32] else: raise ValueError("Unsupported DenseNet version with number of layers {}".format(blocks)) from functools import reduce channels = reduce(lambda xi, yi: xi + [reduce(lambda xj, yj: xj + [xj[-1] + yj], [growth_rate] * yi, [xi[-1][-1] // 2])[1:]], layers, [[init_block_channels * 2]])[1:] net = DenseNet( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def densenet121(**kwargs): """ DenseNet-121 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet(blocks=121, model_name="densenet121", **kwargs) def densenet161(**kwargs): """ DenseNet-161 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet(blocks=161, model_name="densenet161", **kwargs) def densenet169(**kwargs): """ DenseNet-169 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet(blocks=169, model_name="densenet169", **kwargs) def densenet201(**kwargs): """ DenseNet-201 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet(blocks=201, model_name="densenet201", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ densenet121, densenet161, densenet169, densenet201, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != densenet121 or weight_count == 7978856) assert (model != densenet161 or weight_count == 28681000) assert (model != densenet169 or weight_count == 14149480) assert (model != densenet201 or weight_count == 20013928) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
10,417
31.354037
116
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/seresnext.py
""" SE-ResNeXt for ImageNet-1K, implemented in Chainer. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['SEResNeXt', 'seresnext50_32x4d', 'seresnext101_32x4d', 'seresnext101_64x4d'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, SimpleSequential, SEBlock from .resnet import ResInitBlock from .resnext import ResNeXtBottleneck class SEResNeXtUnit(Chain): """ SE-ResNeXt unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. """ def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width): super(SEResNeXtUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): self.body = ResNeXtBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width) self.se = SEBlock(channels=out_channels) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = self.se(x) x = x + identity x = self.activ(x) return x class SEResNeXt(Chain): """ SE-ResNeXt model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(224, 224), classes=1000): super(SEResNeXt, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), SEResNeXtUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_seresnext(blocks, cardinality, bottleneck_width, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create SE-ResNeXt model with specific parameters. Parameters: ---------- blocks : int Number of blocks. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] else: raise ValueError("Unsupported SE-ResNeXt with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = SEResNeXt( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def seresnext50_32x4d(**kwargs): """ SE-ResNeXt-50 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="seresnext50_32x4d", **kwargs) def seresnext101_32x4d(**kwargs): """ SE-ResNeXt-101 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="seresnext101_32x4d", **kwargs) def seresnext101_64x4d(**kwargs): """ SE-ResNeXt-101 (64x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_seresnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="seresnext101_64x4d", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ seresnext50_32x4d, seresnext101_32x4d, seresnext101_64x4d, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != seresnext50_32x4d or weight_count == 27559896) assert (model != seresnext101_32x4d or weight_count == 48955416) assert (model != seresnext101_64x4d or weight_count == 88232984) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
8,957
31.456522
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/darts.py
""" DARTS for ImageNet-1K, implemented in Chainer. Original paper: 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055. """ __all__ = ['DARTS', 'darts'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, SimpleSequential from .nasnet import nasnet_dual_path_sequential class DwsConv(Chain): """ Standard dilated depthwise separable convolution block with. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. dilate : int or tuple/list of 2 int Dilation value for convolution layer. use_bias : bool, default False Whether the layers use a bias vector. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate, use_bias=False): super(DwsConv, self).__init__() with self.init_scope(): self.dw_conv = L.Convolution2D( in_channels=in_channels, out_channels=in_channels, ksize=ksize, stride=stride, pad=pad, nobias=(not use_bias), dilate=dilate, groups=in_channels) self.pw_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias) def forward(self, x): x = self.dw_conv(x) x = self.pw_conv(x) return x class DartsConv(Chain): """ DARTS specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. activate : bool, default True Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, activate=True): super(DartsConv, self).__init__() self.activate = activate with self.init_scope(): if self.activate: self.activ = F.relu self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=True) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5) def forward(self, x): if self.activate: x = self.activ(x) x = self.conv(x) x = self.bn(x) return x def darts_conv1x1(in_channels, out_channels, activate=True): """ 1x1 version of the DARTS specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activate : bool, default True Whether activate the convolution block. """ return DartsConv( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=1, pad=0, activate=activate) def darts_conv3x3_s2(in_channels, out_channels, activate=True): """ 3x3 version of the DARTS specific convolution block with stride 2. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activate : bool, default True Whether activate the convolution block. """ return DartsConv( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=2, pad=1, activate=activate) class DartsDwsConv(Chain): """ DARTS specific dilated convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilate : int or tuple/list of 2 int Dilation value for convolution layer. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate): super(DartsDwsConv, self).__init__() with self.init_scope(): self.activ = F.relu self.conv = DwsConv( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, dilate=dilate, use_bias=False) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5) def forward(self, x): x = self.activ(x) x = self.conv(x) x = self.bn(x) return x class DartsDwsBranch(Chain): """ DARTS specific block with depthwise separable convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. """ def __init__(self, in_channels, out_channels, ksize, stride, pad): super(DartsDwsBranch, self).__init__() mid_channels = in_channels with self.init_scope(): self.conv1 = DartsDwsConv( in_channels=in_channels, out_channels=mid_channels, ksize=ksize, stride=stride, pad=pad, dilate=1) self.conv2 = DartsDwsConv( in_channels=mid_channels, out_channels=out_channels, ksize=ksize, stride=1, pad=pad, dilate=1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class DartsReduceBranch(Chain): """ DARTS specific factorized reduce block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 2 Stride of the convolution. """ def __init__(self, in_channels, out_channels, stride=2): super(DartsReduceBranch, self).__init__() assert (out_channels % 2 == 0) mid_channels = out_channels // 2 with self.init_scope(): self.activ = F.relu self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels, stride=stride) self.conv2 = conv1x1( in_channels=in_channels, out_channels=mid_channels, stride=stride) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5) def forward(self, x): x = self.activ(x) x1 = self.conv1(x) x = x[:, :, 1:, 1:] x2 = self.conv2(x) x = F.concat((x1, x2), axis=1) x = self.bn(x) return x class Stem1Unit(Chain): """ DARTS Stem1 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(Stem1Unit, self).__init__() mid_channels = out_channels // 2 with self.init_scope(): self.conv1 = darts_conv3x3_s2( in_channels=in_channels, out_channels=mid_channels, activate=False) self.conv2 = darts_conv3x3_s2( in_channels=mid_channels, out_channels=out_channels, activate=True) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x def stem2_unit(in_channels, out_channels): """ DARTS Stem2 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ return darts_conv3x3_s2( in_channels=in_channels, out_channels=out_channels, activate=True) def darts_maxpool3x3(channels, stride): """ DARTS specific 3x3 Max pooling layer. Parameters: ---------- channels : int Number of input/output channels. Unused parameter. stride : int or tuple/list of 2 int Stride of the convolution. """ assert (channels > 0) return partial( F.max_pooling_2d, ksize=3, stride=stride, pad=1, cover_all=False) def darts_skip_connection(channels, stride): """ DARTS specific skip connection layer. Parameters: ---------- channels : int Number of input/output channels. stride : int or tuple/list of 2 int Stride of the convolution. """ assert (channels > 0) if stride == 1: return F.identity else: assert (stride == 2) return DartsReduceBranch( in_channels=channels, out_channels=channels, stride=stride) def darts_dws_conv3x3(channels, stride): """ 3x3 version of DARTS specific dilated convolution block. Parameters: ---------- channels : int Number of input/output channels. stride : int or tuple/list of 2 int Stride of the convolution. """ return DartsDwsConv( in_channels=channels, out_channels=channels, ksize=3, stride=stride, pad=2, dilate=2) def darts_dws_branch3x3(channels, stride): """ 3x3 version of DARTS specific dilated convolution branch. Parameters: ---------- channels : int Number of input/output channels. stride : int or tuple/list of 2 int Stride of the convolution. """ return DartsDwsBranch( in_channels=channels, out_channels=channels, ksize=3, stride=stride, pad=1) # Set of operations in genotype. GENOTYPE_OPS = { 'max_pool_3x3': darts_maxpool3x3, 'skip_connect': darts_skip_connection, 'dil_conv_3x3': darts_dws_conv3x3, 'sep_conv_3x3': darts_dws_branch3x3, } class DartsMainBlock(Chain): """ DARTS main block, described by genotype. Parameters: ---------- genotype : list of tuples (str, int) List of genotype elements (operations and linked indices). channels : int Number of input/output channels. reduction : bool Whether use reduction. """ def __init__(self, genotype, channels, reduction): super(DartsMainBlock, self).__init__() self.concat = [2, 3, 4, 5] op_names, indices = zip(*genotype) self.indices = indices self.steps = len(op_names) // 2 with self.init_scope(): for i, (name, index) in enumerate(zip(op_names, indices)): stride = 2 if reduction and index < 2 else 1 setattr(self, "ops{}".format(i + 1), GENOTYPE_OPS[name](channels, stride)) def forward(self, x, x_prev): s0 = x_prev s1 = x states = [s0, s1] for i in range(self.steps): j1 = 2 * i j2 = 2 * i + 1 op1 = getattr(self, "ops{}".format(j1 + 1)) op2 = getattr(self, "ops{}".format(j2 + 1)) y1 = states[self.indices[j1]] y2 = states[self.indices[j2]] y1 = op1(y1) y2 = op2(y2) s = y1 + y2 states += [s] x_out = F.concat([states[i] for i in self.concat], axis=1) return x_out class DartsUnit(Chain): """ DARTS unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. genotype : list of tuples (str, int) List of genotype elements (operations and linked indices). reduction : bool Whether use reduction. prev_reduction : bool Whether use previous reduction. """ def __init__(self, in_channels, prev_in_channels, out_channels, genotype, reduction, prev_reduction): super(DartsUnit, self).__init__() mid_channels = out_channels // 4 with self.init_scope(): if prev_reduction: self.preprocess_prev = DartsReduceBranch( in_channels=prev_in_channels, out_channels=mid_channels) else: self.preprocess_prev = darts_conv1x1( in_channels=prev_in_channels, out_channels=mid_channels) self.preprocess = darts_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.body = DartsMainBlock( genotype=genotype, channels=mid_channels, reduction=reduction) def forward(self, x, x_prev): x = self.preprocess(x) x_prev = self.preprocess_prev(x_prev) x_out = self.body(x, x_prev) return x_out class DARTS(Chain): """ DARTS model from 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055. Parameters: ---------- channels : list of list of int Number of output channels for each unit. stem_blocks_channels : int Number of output channels for the Stem units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, stem_blocks_channels, normal_genotype, reduce_genotype, in_channels=3, in_size=(224, 224), classes=1000): super(DARTS, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = nasnet_dual_path_sequential( return_two=False, first_ordinals=2, last_ordinals=1) with self.features.init_scope(): setattr(self.features, "stem1_unit", Stem1Unit( in_channels=in_channels, out_channels=stem_blocks_channels)) in_channels = stem_blocks_channels setattr(self.features, "stem2_unit", stem2_unit( in_channels=in_channels, out_channels=stem_blocks_channels)) prev_in_channels = in_channels in_channels = stem_blocks_channels for i, channels_per_stage in enumerate(channels): stage = nasnet_dual_path_sequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): reduction = (i != 0) and (j == 0) prev_reduction = ((i == 0) and (j == 0)) or ((i != 0) and (j == 1)) genotype = reduce_genotype if reduction else normal_genotype setattr(stage, "unit{}".format(j + 1), DartsUnit( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, genotype=genotype, reduction=reduction, prev_reduction=prev_reduction)) prev_in_channels = in_channels in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def forward(self, x): x = self.features(x) x = self.output(x) return x def get_darts(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create DARTS model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ stem_blocks_channels = 48 layers = [4, 5, 5] channels_per_layers = [192, 384, 768] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] normal_genotype = [ ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)] reduce_genotype = [ ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)] net = DARTS( channels=channels, stem_blocks_channels=stem_blocks_channels, normal_genotype=normal_genotype, reduce_genotype=reduce_genotype, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def darts(**kwargs): """ DARTS model from 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_darts(model_name="darts", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ darts, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != darts or weight_count == 4718752) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
21,007
27.504749
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/drn.py
""" DRN for ImageNet-1K, implemented in Chainer. Original paper: 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. """ __all__ = ['DRN', 'drnc26', 'drnc42', 'drnc58', 'drnd22', 'drnd38', 'drnd54', 'drnd105'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import SimpleSequential class DRNConv(Chain): """ DRN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. dilate : int or tuple/list of 2 int Dilation value for convolution layer. activate : bool Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate, activate): super(DRNConv, self).__init__() self.activate = activate with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=True, dilate=dilate) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5) if self.activate: self.activ = F.relu def __call__(self, x): x = self.conv(x) x = self.bn(x) if self.activate: x = self.activ(x) return x def drn_conv1x1(in_channels, out_channels, stride, activate): """ 1x1 version of the DRN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. activate : bool Whether activate the convolution block. """ return DRNConv( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, pad=0, dilate=1, activate=activate) def drn_conv3x3(in_channels, out_channels, stride, dilate, activate): """ 3x3 version of the DRN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. dilate : int or tuple/list of 2 int Padding/dilation value for convolution layer. activate : bool Whether activate the convolution block. """ return DRNConv( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=dilate, dilate=dilate, activate=activate) class DRNBlock(Chain): """ Simple DRN block for residual path in DRN unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. dilate : int or tuple/list of 2 int Padding/dilation value for convolution layers. """ def __init__(self, in_channels, out_channels, stride, dilate): super(DRNBlock, self).__init__() with self.init_scope(): self.conv1 = drn_conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride, dilate=dilate, activate=True) self.conv2 = drn_conv3x3( in_channels=out_channels, out_channels=out_channels, stride=1, dilate=dilate, activate=False) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class DRNBottleneck(Chain): """ DRN bottleneck block for residual path in DRN unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. dilate : int or tuple/list of 2 int Padding/dilation value for 3x3 convolution layer. """ def __init__(self, in_channels, out_channels, stride, dilate): super(DRNBottleneck, self).__init__() mid_channels = out_channels // 4 with self.init_scope(): self.conv1 = drn_conv1x1( in_channels=in_channels, out_channels=mid_channels, stride=1, activate=True) self.conv2 = drn_conv3x3( in_channels=mid_channels, out_channels=mid_channels, stride=stride, dilate=dilate, activate=True) self.conv3 = drn_conv1x1( in_channels=mid_channels, out_channels=out_channels, stride=1, activate=False) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class DRNUnit(Chain): """ DRN unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. dilate : int or tuple/list of 2 int Padding/dilation value for 3x3 convolution layers. bottleneck : bool Whether to use a bottleneck or simple block in units. simplified : bool Whether to use a simple or simplified block in units. residual : bool Whether do residual calculations. """ def __init__(self, in_channels, out_channels, stride, dilate, bottleneck, simplified, residual): super(DRNUnit, self).__init__() assert residual or (not bottleneck) assert (not (bottleneck and simplified)) assert (not (residual and simplified)) self.residual = residual self.resize_identity = ((in_channels != out_channels) or (stride != 1)) and self.residual and (not simplified) with self.init_scope(): if bottleneck: self.body = DRNBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, dilate=dilate) elif simplified: self.body = drn_conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride, dilate=dilate, activate=False) else: self.body = DRNBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, dilate=dilate) if self.resize_identity: self.identity_conv = drn_conv1x1( in_channels=in_channels, out_channels=out_channels, stride=stride, activate=False) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) if self.residual: x = x + identity x = self.activ(x) return x def drn_init_block(in_channels, out_channels): """ DRN specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ return DRNConv( in_channels=in_channels, out_channels=out_channels, ksize=7, stride=1, pad=3, dilate=1, activate=True) class DRN(Chain): """ DRN-C&D model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. dilations : list of list of int Dilation values for 3x3 convolution layers for each unit. bottlenecks : list of list of int Whether to use a bottleneck or simple block in each unit. simplifieds : list of list of int Whether to use a simple or simplified block in each unit. residuals : list of list of int Whether to use residual block in each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, dilations, bottlenecks, simplifieds, residuals, in_channels=3, in_size=(224, 224), classes=1000): super(DRN, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", drn_init_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), DRNUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, dilate=dilations[i][j], bottleneck=(bottlenecks[i][j] == 1), simplified=(simplifieds[i][j] == 1), residual=(residuals[i][j] == 1))) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=28, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "final_conv", L.Convolution2D( in_channels=in_channels, out_channels=classes, ksize=1)) setattr(self.output, "final_flatten", partial( F.reshape, shape=(-1, classes))) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_drn(blocks, simplified=False, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create DRN-C or DRN-D model with specific parameters. Parameters: ---------- blocks : int Number of blocks. simplified : bool, default False Whether to use simplified scheme (D architecture). model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 22: assert simplified layers = [1, 1, 2, 2, 2, 2, 1, 1] elif blocks == 26: layers = [1, 1, 2, 2, 2, 2, 1, 1] elif blocks == 38: assert simplified layers = [1, 1, 3, 4, 6, 3, 1, 1] elif blocks == 42: layers = [1, 1, 3, 4, 6, 3, 1, 1] elif blocks == 54: assert simplified layers = [1, 1, 3, 4, 6, 3, 1, 1] elif blocks == 58: layers = [1, 1, 3, 4, 6, 3, 1, 1] elif blocks == 105: assert simplified layers = [1, 1, 3, 4, 23, 3, 1, 1] else: raise ValueError("Unsupported DRN with number of blocks: {}".format(blocks)) if blocks < 50: channels_per_layers = [16, 32, 64, 128, 256, 512, 512, 512] bottlenecks_per_layers = [0, 0, 0, 0, 0, 0, 0, 0] else: channels_per_layers = [16, 32, 256, 512, 1024, 2048, 512, 512] bottlenecks_per_layers = [0, 0, 1, 1, 1, 1, 0, 0] if simplified: simplifieds_per_layers = [1, 1, 0, 0, 0, 0, 1, 1] residuals_per_layers = [0, 0, 1, 1, 1, 1, 0, 0] else: simplifieds_per_layers = [0, 0, 0, 0, 0, 0, 0, 0] residuals_per_layers = [1, 1, 1, 1, 1, 1, 0, 0] dilations_per_layers = [1, 1, 1, 1, 2, 4, 2, 1] downsample = [0, 1, 1, 1, 0, 0, 0, 0] def expand(property_per_layers): from functools import reduce return reduce( lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(property_per_layers, layers, downsample), [[]]) channels = expand(channels_per_layers) dilations = expand(dilations_per_layers) bottlenecks = expand(bottlenecks_per_layers) residuals = expand(residuals_per_layers) simplifieds = expand(simplifieds_per_layers) init_block_channels = channels_per_layers[0] net = DRN( channels=channels, init_block_channels=init_block_channels, dilations=dilations, bottlenecks=bottlenecks, simplifieds=simplifieds, residuals=residuals, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def drnc26(**kwargs): """ DRN-C-26 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_drn(blocks=26, model_name="drnc26", **kwargs) def drnc42(**kwargs): """ DRN-C-42 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_drn(blocks=42, model_name="drnc42", **kwargs) def drnc58(**kwargs): """ DRN-C-58 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_drn(blocks=58, model_name="drnc58", **kwargs) def drnd22(**kwargs): """ DRN-D-58 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_drn(blocks=22, simplified=True, model_name="drnd22", **kwargs) def drnd38(**kwargs): """ DRN-D-38 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_drn(blocks=38, simplified=True, model_name="drnd38", **kwargs) def drnd54(**kwargs): """ DRN-D-54 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_drn(blocks=54, simplified=True, model_name="drnd54", **kwargs) def drnd105(**kwargs): """ DRN-D-105 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_drn(blocks=105, simplified=True, model_name="drnd105", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ drnc26, drnc42, drnc58, drnd22, drnd38, drnd54, drnd105, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != drnc26 or weight_count == 21126584) assert (model != drnc42 or weight_count == 31234744) assert (model != drnc58 or weight_count == 40542008) # 41591608 assert (model != drnd22 or weight_count == 16393752) assert (model != drnd38 or weight_count == 26501912) assert (model != drnd54 or weight_count == 35809176) assert (model != drnd105 or weight_count == 54801304) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
19,329
29.488959
118
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/mixnet.py
""" MixNet for ImageNet-1K, implemented in Chainer. Original paper: 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595. """ __all__ = ['MixNet', 'mixnet_s', 'mixnet_m', 'mixnet_l'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import round_channels, get_activation_layer, conv1x1_block, conv3x3_block, dwconv3x3_block, SEBlock,\ SimpleSequential class MixConv(Chain): """ Mixed convolution layer from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of int, or tuple/list of tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of int, or tuple/list of tuple/list of 2 int Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. axis : int, default 1 The axis on which to concatenate the outputs. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, groups=1, use_bias=False, axis=1): super(MixConv, self).__init__() self.layer_names = [] ksize = ksize if isinstance(ksize, list) else [ksize] pad = pad if isinstance(pad, list) else [pad] kernel_count = len(ksize) splitted_in_channels = self.split_channels(in_channels, kernel_count) splitted_out_channels = self.split_channels(out_channels, kernel_count) self.axis = axis import numpy as np self.in_channel_inds = list(np.add.accumulate(splitted_in_channels))[:-1] with self.init_scope(): for i, ksize_i in enumerate(ksize): in_channels_i = splitted_in_channels[i] out_channels_i = splitted_out_channels[i] pad_i = pad[i] setattr(self, "conv{}".format(i + 1), L.Convolution2D( in_channels=in_channels_i, out_channels=out_channels_i, ksize=ksize_i, stride=stride, pad=pad_i, nobias=(not use_bias), dilate=dilate, groups=(out_channels_i if out_channels == groups else groups))) def __setattr__(self, name, value): super(MixConv, self).__setattr__(name, value) if self.within_init_scope and callable(value): self.layer_names.append(name) def __delattr__(self, name): super(MixConv, self).__delattr__(name) try: self.layer_names.remove(name) except ValueError: pass def __len__(self): return len(self.layer_names) def __call__(self, x): xx = F.split_axis(x, self.in_channel_inds, axis=self.axis) out = [self[name_i](x_i) for x_i, name_i in zip(xx, self.layer_names)] x = F.concat(tuple(out), axis=self.axis) return x @staticmethod def split_channels(channels, kernel_count): splitted_channels = [channels // kernel_count] * kernel_count splitted_channels[0] += channels - sum(splitted_channels) return splitted_channels class MixConvBlock(Chain): """ Mixed convolution block with Batch normalization and activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of int, or tuple/list of tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of int, or tuple/list of tuple/list of 2 int Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.activate Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu)): super(MixConvBlock, self).__init__() self.activate = (activation is not None) self.use_bn = use_bn with self.init_scope(): self.conv = MixConv( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, dilate=dilate, groups=groups, use_bias=use_bias) if self.use_bn: self.bn = L.BatchNormalization( size=out_channels, eps=bn_eps) if self.activate: self.activ = get_activation_layer(activation) def __call__(self, x): x = self.conv(x) if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) return x def mixconv1x1_block(in_channels, out_channels, kernel_count, stride=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu)): """ 1x1 version of the mixed convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_count : int Kernel count. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str, or None, default F.activate Activation function or name of activation function. """ return MixConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=([1] * kernel_count), stride=stride, pad=([0] * kernel_count), groups=groups, use_bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) class MixUnit(Chain): """ MixNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. exp_channels : int Number of middle (expanded) channels. stride : int or tuple/list of 2 int Stride of the second convolution layer. exp_kernel_count : int Expansion convolution kernel count for each unit. conv1_kernel_count : int Conv1 kernel count for each unit. conv2_kernel_count : int Conv2 kernel count for each unit. exp_factor : int Expansion factor for each unit. se_factor : int SE reduction factor for each unit. activation : str Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, stride, exp_kernel_count, conv1_kernel_count, conv2_kernel_count, exp_factor, se_factor, activation): super(MixUnit, self).__init__() assert (exp_factor >= 1) assert (se_factor >= 0) self.residual = (in_channels == out_channels) and (stride == 1) self.use_se = se_factor > 0 mid_channels = exp_factor * in_channels self.use_exp_conv = exp_factor > 1 with self.init_scope(): if self.use_exp_conv: if exp_kernel_count == 1: self.exp_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=activation) else: self.exp_conv = mixconv1x1_block( in_channels=in_channels, out_channels=mid_channels, kernel_count=exp_kernel_count, activation=activation) if conv1_kernel_count == 1: self.conv1 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=activation) else: self.conv1 = MixConvBlock( in_channels=mid_channels, out_channels=mid_channels, ksize=[3 + 2 * i for i in range(conv1_kernel_count)], stride=stride, pad=[1 + i for i in range(conv1_kernel_count)], groups=mid_channels, activation=activation) if self.use_se: self.se = SEBlock( channels=mid_channels, reduction=(exp_factor * se_factor), round_mid=False, mid_activation=activation) if conv2_kernel_count == 1: self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) else: self.conv2 = mixconv1x1_block( in_channels=mid_channels, out_channels=out_channels, kernel_count=conv2_kernel_count, activation=None) def __call__(self, x): if self.residual: identity = x if self.use_exp_conv: x = self.exp_conv(x) x = self.conv1(x) if self.use_se: x = self.se(x) x = self.conv2(x) if self.residual: x = x + identity return x class MixInitBlock(Chain): """ MixNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(MixInitBlock, self).__init__() with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2) self.conv2 = MixUnit( in_channels=out_channels, out_channels=out_channels, stride=1, exp_kernel_count=1, conv1_kernel_count=1, conv2_kernel_count=1, exp_factor=1, se_factor=0, activation="relu") def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class MixNet(Chain): """ MixNet model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. exp_kernel_counts : list of list of int Expansion convolution kernel count for each unit. conv1_kernel_counts : list of list of int Conv1 kernel count for each unit. conv2_kernel_counts : list of list of int Conv2 kernel count for each unit. exp_factors : list of list of int Expansion factor for each unit. se_factors : list of list of int SE reduction factor for each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, exp_kernel_counts, conv1_kernel_counts, conv2_kernel_counts, exp_factors, se_factors, in_channels=3, in_size=(224, 224), classes=1000): super(MixNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", MixInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if ((j == 0) and (i != 3)) or\ ((j == len(channels_per_stage) // 2) and (i == 3)) else 1 exp_kernel_count = exp_kernel_counts[i][j] conv1_kernel_count = conv1_kernel_counts[i][j] conv2_kernel_count = conv2_kernel_counts[i][j] exp_factor = exp_factors[i][j] se_factor = se_factors[i][j] activation = "relu" if i == 0 else "swish" setattr(stage, "unit{}".format(j + 1), MixUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, exp_kernel_count=exp_kernel_count, conv1_kernel_count=conv1_kernel_count, conv2_kernel_count=conv2_kernel_count, exp_factor=exp_factor, se_factor=se_factor, activation=activation)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels)) in_channels = final_block_channels setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_mixnet(version, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create MixNet model with specific parameters. Parameters: ---------- version : str Version of MobileNetV3 ('s' or 'm'). width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if version == "s": init_block_channels = 16 channels = [[24, 24], [40, 40, 40, 40], [80, 80, 80], [120, 120, 120, 200, 200, 200]] exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 1, 1], [2, 2, 2, 1, 1, 1]] conv1_kernel_counts = [[1, 1], [3, 2, 2, 2], [3, 2, 2], [3, 4, 4, 5, 4, 4]] conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [2, 2, 2], [2, 2, 2, 1, 2, 2]] exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6], [6, 3, 3, 6, 6, 6]] se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4], [2, 2, 2, 2, 2, 2]] elif version == "m": init_block_channels = 24 channels = [[32, 32], [40, 40, 40, 40], [80, 80, 80, 80], [120, 120, 120, 120, 200, 200, 200, 200]] exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2, 1, 1, 1, 1]] conv1_kernel_counts = [[3, 1], [4, 2, 2, 2], [3, 4, 4, 4], [1, 4, 4, 4, 4, 4, 4, 4]] conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2, 1, 2, 2, 2]] exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6, 6], [6, 3, 3, 3, 6, 6, 6, 6]] se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4, 4], [2, 2, 2, 2, 2, 2, 2, 2]] else: raise ValueError("Unsupported MixNet version {}".format(version)) final_block_channels = 1536 if width_scale != 1.0: channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = round_channels(init_block_channels * width_scale) net = MixNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, exp_kernel_counts=exp_kernel_counts, conv1_kernel_counts=conv1_kernel_counts, conv2_kernel_counts=conv2_kernel_counts, exp_factors=exp_factors, se_factors=se_factors, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def mixnet_s(**kwargs): """ MixNet-S model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mixnet(version="s", width_scale=1.0, model_name="mixnet_s", **kwargs) def mixnet_m(**kwargs): """ MixNet-M model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mixnet(version="m", width_scale=1.0, model_name="mixnet_m", **kwargs) def mixnet_l(**kwargs): """ MixNet-L model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mixnet(version="m", width_scale=1.3, model_name="mixnet_l", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ mixnet_s, mixnet_m, mixnet_l, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != mixnet_s or weight_count == 4134606) assert (model != mixnet_m or weight_count == 5014382) assert (model != mixnet_l or weight_count == 7329252) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
21,565
34.354098
116
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/dabnet.py
""" DABNet for image segmentation, implemented in Chainer. Original paper: 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,' https://arxiv.org/abs/1907.11357. """ __all__ = ['DABNet', 'dabnet_cityscapes'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, conv3x3, conv3x3_block, ConvBlock, NormActivation, Concurrent, InterpolationBlock,\ DualPathSequential, SimpleSequential class DwaConvBlock(Chain): """ Depthwise asymmetric separable convolution block. Parameters: ---------- channels : int Number of input/output channels. ksize : int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int Padding value for convolution layer. dilate : int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.relu Activation function or name of activation function. """ def __init__(self, channels, ksize, stride, pad, dilate=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu), **kwargs): super(DwaConvBlock, self).__init__(**kwargs) with self.init_scope(): self.conv1 = ConvBlock( in_channels=channels, out_channels=channels, ksize=(ksize, 1), stride=stride, pad=(pad, 0), dilate=(dilate, 1), groups=channels, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) self.conv2 = ConvBlock( in_channels=channels, out_channels=channels, ksize=(1, ksize), stride=stride, pad=(0, pad), dilate=(1, dilate), groups=channels, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x def dwa_conv3x3_block(channels, stride=1, pad=1, dilate=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu), **kwargs): """ 3x3 version of the depthwise asymmetric separable convolution block. Parameters: ---------- channels : int Number of input/output channels. stride : int, default 1 Stride of the convolution. pad : int, default 1 Padding value for convolution layer. dilate : int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return DwaConvBlock( channels=channels, ksize=3, stride=stride, pad=pad, dilate=dilate, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation, **kwargs) class DABBlock(Chain): """ DABNet specific base block. Parameters: ---------- channels : int Number of input/output channels. dilate : int Dilation value for a dilated branch in the unit. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, channels, dilate, bn_eps, **kwargs): super(DABBlock, self).__init__(**kwargs) mid_channels = channels // 2 with self.init_scope(): self.norm_activ1 = NormActivation( in_channels=channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(channels))) self.conv1 = conv3x3_block( in_channels=channels, out_channels=mid_channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(mid_channels))) self.branches = Concurrent(stack=True) with self.branches.init_scope(): setattr(self.branches, "branches1", dwa_conv3x3_block( channels=mid_channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(mid_channels)))) setattr(self.branches, "branches2", dwa_conv3x3_block( channels=mid_channels, pad=dilate, dilate=dilate, bn_eps=bn_eps, activation=(lambda: L.PReLU(mid_channels)))) self.norm_activ2 = NormActivation( in_channels=mid_channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(mid_channels))) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=channels) def __call__(self, x): identity = x x = self.norm_activ1(x) x = self.conv1(x) x = self.branches(x) x = F.sum(x, axis=1) x = self.norm_activ2(x) x = self.conv2(x) x = x + identity return x class DownBlock(Chain): """ DABNet specific downsample block for the main branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps, **kwargs): super(DownBlock, self).__init__(**kwargs) self.expand = (in_channels < out_channels) mid_channels = out_channels - in_channels if self.expand else out_channels with self.init_scope(): self.conv = conv3x3( in_channels=in_channels, out_channels=mid_channels, stride=2) if self.expand: self.pool = partial( F.max_pooling_2d, ksize=2, stride=2, cover_all=False) self.norm_activ = NormActivation( in_channels=out_channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(out_channels))) def __call__(self, x): y = self.conv(x) if self.expand: z = self.pool(x) y = F.concat((y, z), axis=1) y = self.norm_activ(y) return y class DABUnit(Chain): """ DABNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dilates : list of int Dilations for blocks. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, dilates, bn_eps, **kwargs): super(DABUnit, self).__init__(**kwargs) mid_channels = out_channels // 2 with self.init_scope(): self.down = DownBlock( in_channels=in_channels, out_channels=mid_channels, bn_eps=bn_eps) self.blocks = SimpleSequential() with self.blocks.init_scope(): for i, dilate in enumerate(dilates): setattr(self.blocks, "block{}".format(i + 1), DABBlock( channels=mid_channels, dilate=dilate, bn_eps=bn_eps)) def __call__(self, x): x = self.down(x) y = self.blocks(x) x = F.concat((y, x), axis=1) return x class DABStage(Chain): """ DABNet stage. Parameters: ---------- x_channels : int Number of input/output channels for x. y_in_channels : int Number of input channels for y. y_out_channels : int Number of output channels for y. dilates : list of int Dilations for blocks. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, x_channels, y_in_channels, y_out_channels, dilates, bn_eps, **kwargs): super(DABStage, self).__init__(**kwargs) self.use_unit = (len(dilates) > 0) with self.init_scope(): self.x_down = partial( F.average_pooling_2d, ksize=3, stride=2, pad=1) if self.use_unit: self.unit = DABUnit( in_channels=y_in_channels, out_channels=(y_out_channels - x_channels), dilates=dilates, bn_eps=bn_eps) self.norm_activ = NormActivation( in_channels=y_out_channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(y_out_channels))) def __call__(self, y, x): x = self.x_down(x) if self.use_unit: y = self.unit(y) y = F.concat((y, x), axis=1) y = self.norm_activ(y) return y, x class DABInitBlock(Chain): """ DABNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps, **kwargs): super(DABInitBlock, self).__init__(**kwargs) with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, bn_eps=bn_eps, activation=(lambda: L.PReLU(out_channels))) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(out_channels))) self.conv3 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(out_channels))) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class DABNet(Chain): """ DABNet model from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,' https://arxiv.org/abs/1907.11357. Parameters: ---------- channels : list of int Number of output channels for each unit (for y-branch). init_block_channels : int Number of output channels for the initial unit. dilates : list of list of int Dilations for blocks. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. classes : int, default 19 Number of segmentation classes. """ def __init__(self, channels, init_block_channels, dilates, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), classes=19, **kwargs): super(DABNet, self).__init__(**kwargs) assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.classes = classes self.fixed_size = fixed_size with self.init_scope(): self.features = DualPathSequential( return_two=False, first_ordinals=1, last_ordinals=0) with self.features.init_scope(): setattr(self.features, "init_block", DABInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps)) y_in_channels = init_block_channels for i, (y_out_channels, dilates_i) in enumerate(zip(channels, dilates)): setattr(self.features, "stage{}".format(i + 1), DABStage( x_channels=in_channels, y_in_channels=y_in_channels, y_out_channels=y_out_channels, dilates=dilates_i, bn_eps=bn_eps)) y_in_channels = y_out_channels self.classifier = conv1x1( in_channels=y_in_channels, out_channels=classes) self.up = InterpolationBlock( scale_factor=8, align_corners=False) def __call__(self, x): in_size = self.in_size if self.fixed_size else x.shape[2:] y = self.features(x, x) y = self.classifier(y) y = self.up(y, size=in_size) return y def get_dabnet(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create DABNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ init_block_channels = 32 channels = [35, 131, 259] dilates = [[], [2, 2, 2], [4, 4, 8, 8, 16, 16]] bn_eps = 1e-3 net = DABNet( channels=channels, init_block_channels=init_block_channels, dilates=dilates, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def dabnet_cityscapes(classes=19, **kwargs): """ DABNet model for Cityscapes from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,' https://arxiv.org/abs/1907.11357. Parameters: ---------- classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_dabnet(classes=classes, model_name="dabnet_cityscapes", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ dabnet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != dabnet_cityscapes or weight_count == 756643) batch = 4 x = np.random.rand(batch, 3, in_size[0], in_size[1]).astype(np.float32) y = net(x) assert (y.shape == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
17,130
29.428064
116
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/cgnet.py
""" CGNet for image segmentation, implemented in Chainer. Original paper: 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,' https://arxiv.org/abs/1811.08201. """ __all__ = ['CGNet', 'cgnet_cityscapes'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import NormActivation, conv1x1, conv1x1_block, conv3x3_block, depthwise_conv3x3, SEBlock, Concurrent,\ DualPathSequential, SimpleSequential, InterpolationBlock class CGBlock(Chain): """ CGNet block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dilate : int Dilation value. se_reduction : int SE-block reduction value. down : bool Whether to downsample. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, dilate, se_reduction, down, bn_eps, **kwargs): super(CGBlock, self).__init__(**kwargs) self.down = down if self.down: mid1_channels = out_channels mid2_channels = 2 * out_channels else: mid1_channels = out_channels // 2 mid2_channels = out_channels with self.init_scope(): if self.down: self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, bn_eps=bn_eps, activation=(lambda: L.PReLU(out_channels))) else: self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid1_channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(mid1_channels))) self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "branches1", depthwise_conv3x3(channels=mid1_channels)) setattr(self.branches, "branches2", depthwise_conv3x3( channels=mid1_channels, pad=dilate, dilate=dilate)) self.norm_activ = NormActivation( in_channels=mid2_channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(mid2_channels))) if self.down: self.conv2 = conv1x1( in_channels=mid2_channels, out_channels=out_channels) self.se = SEBlock( channels=out_channels, reduction=se_reduction, use_conv=False) def __call__(self, x): if not self.down: identity = x x = self.conv1(x) x = self.branches(x) x = self.norm_activ(x) if self.down: x = self.conv2(x) x = self.se(x) if not self.down: x += identity return x class CGUnit(Chain): """ CGNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. layers : int Number of layers. dilate : int Dilation value. se_reduction : int SE-block reduction value. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, layers, dilate, se_reduction, bn_eps, **kwargs): super(CGUnit, self).__init__(**kwargs) mid_channels = out_channels // 2 with self.init_scope(): self.down = CGBlock( in_channels=in_channels, out_channels=mid_channels, dilate=dilate, se_reduction=se_reduction, down=True, bn_eps=bn_eps) self.blocks = SimpleSequential() with self.blocks.init_scope(): for i in range(layers - 1): setattr(self.blocks, "block{}".format(i + 1), CGBlock( in_channels=mid_channels, out_channels=mid_channels, dilate=dilate, se_reduction=se_reduction, down=False, bn_eps=bn_eps)) def __call__(self, x): x = self.down(x) y = self.blocks(x) x = F.concat((y, x), axis=1) # NB: This differs from the original implementation. return x class CGStage(Chain): """ CGNet stage. Parameters: ---------- x_channels : int Number of input/output channels for x. y_in_channels : int Number of input channels for y. y_out_channels : int Number of output channels for y. layers : int Number of layers in the unit. dilate : int Dilation for blocks. se_reduction : int SE-block reduction value for blocks. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, x_channels, y_in_channels, y_out_channels, layers, dilate, se_reduction, bn_eps, **kwargs): super(CGStage, self).__init__(**kwargs) self.use_x = (x_channels > 0) self.use_unit = (layers > 0) with self.init_scope(): if self.use_x: self.x_down = partial( F.average_pooling_2d, ksize=3, stride=2, pad=1) if self.use_unit: self.unit = CGUnit( in_channels=y_in_channels, out_channels=(y_out_channels - x_channels), layers=layers, dilate=dilate, se_reduction=se_reduction, bn_eps=bn_eps) self.norm_activ = NormActivation( in_channels=y_out_channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(y_out_channels))) def __call__(self, y, x=None): if self.use_unit: y = self.unit(y) if self.use_x: x = self.x_down(x) y = F.concat((y, x), axis=1) y = self.norm_activ(y) return y, x class CGInitBlock(Chain): """ CGNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps, **kwargs): super(CGInitBlock, self).__init__(**kwargs) with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, bn_eps=bn_eps, activation=(lambda: L.PReLU(out_channels))) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(out_channels))) self.conv3 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bn_eps=bn_eps, activation=(lambda: L.PReLU(out_channels))) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class CGNet(Chain): """ CGNet model from 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,' https://arxiv.org/abs/1811.08201. Parameters: ---------- layers : list of int Number of layers for each unit. channels : list of int Number of output channels for each unit (for y-branch). init_block_channels : int Number of output channels for the initial unit. dilates : list of int Dilations for each unit. se_reductions : list of int SE-block reduction value for each unit. cut_x : list of int Whether to concatenate with x-branch for each unit. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. classes : int, default 19 Number of segmentation classes. """ def __init__(self, layers, channels, init_block_channels, dilates, se_reductions, cut_x, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), classes=19, **kwargs): super(CGNet, self).__init__(**kwargs) assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.classes = classes self.fixed_size = fixed_size with self.init_scope(): self.features = DualPathSequential( return_two=False, first_ordinals=1, last_ordinals=0) with self.features.init_scope(): setattr(self.features, "init_block", CGInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps)) y_in_channels = init_block_channels for i, (layers_i, y_out_channels) in enumerate(zip(layers, channels)): setattr(self.features, "stage{}".format(i + 1), CGStage( x_channels=in_channels if cut_x[i] == 1 else 0, y_in_channels=y_in_channels, y_out_channels=y_out_channels, layers=layers_i, dilate=dilates[i], se_reduction=se_reductions[i], bn_eps=bn_eps)) y_in_channels = y_out_channels self.classifier = conv1x1( in_channels=y_in_channels, out_channels=classes) self.up = InterpolationBlock( scale_factor=8, align_corners=False) def __call__(self, x): in_size = self.in_size if self.fixed_size else x.shape[2:] y = self.features(x, x) y = self.classifier(y) y = self.up(y, size=in_size) return y def get_cgnet(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create CGNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ init_block_channels = 32 layers = [0, 3, 21] channels = [35, 131, 256] dilates = [0, 2, 4] se_reductions = [0, 8, 16] cut_x = [1, 1, 0] bn_eps = 1e-3 net = CGNet( layers=layers, channels=channels, init_block_channels=init_block_channels, dilates=dilates, se_reductions=se_reductions, cut_x=cut_x, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def cgnet_cityscapes(classes=19, **kwargs): """ CGNet model for Cityscapes from 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,' https://arxiv.org/abs/1811.08201. Parameters: ---------- classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_cgnet(classes=classes, model_name="cgnet_cityscapes", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ cgnet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != cgnet_cityscapes or weight_count == 496306) batch = 4 x = np.random.rand(batch, 3, in_size[0], in_size[1]).astype(np.float32) y = net(x) assert (y.shape == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
14,152
29.767391
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/wrn1bit_cifar.py
""" WRN-1bit for CIFAR/SVHN, implemented in Chainer. Original paper: 'Training wide residual networks for deployment using a single bit for each weight,' https://arxiv.org/abs/1802.08530. """ __all__ = ['CIFARWRN1bit', 'wrn20_10_1bit_cifar10', 'wrn20_10_1bit_cifar100', 'wrn20_10_1bit_svhn', 'wrn20_10_32bit_cifar10', 'wrn20_10_32bit_cifar100', 'wrn20_10_32bit_svhn'] import os import math import chainer from chainer import backend import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import SimpleSequential class Binarize(chainer.function.Function): """ Fake sign op for 1-bit weights. """ def forward(self, inputs): x, = inputs xp = backend.get_array_module(x) return math.sqrt(2.0 / (x.shape[1] * x.shape[2] * x.shape[3])) * xp.sign(x), def backward(self, inputs, grad_outputs): dy, = grad_outputs return dy, class Convolution2D1bit(L.Convolution2D): """ Standard convolution block with binarization. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int, default 1 pad value for convolution layer. dilate : int or tuple/list of 2 int, default 1 dilate value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. binarized : bool, default False Whether to use binarization. """ def __init__(self, in_channels, out_channels, ksize, stride, pad=1, dilate=1, groups=1, use_bias=False, binarized=False): super(Convolution2D1bit, self).__init__( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, dilate=dilate, groups=groups, nobias=(not use_bias)) self.binarized = binarized def forward(self, x): W_1bit = Binarize()(self.W) if self.binarized else self.W b_1bit = Binarize()(self.b) if self.b is not None and self.binarized else self.b return F.convolution_2d( x=x, W=W_1bit, b=b_1bit, stride=self.stride, pad=self.pad, dilate=self.dilate, groups=self.groups) def conv1x1_1bit(in_channels, out_channels, stride=1, groups=1, use_bias=False, binarized=False): """ Convolution 1x1 layer with binarization. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. binarized : bool, default False Whether to use binarization. """ return Convolution2D1bit( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, groups=groups, use_bias=use_bias, binarized=binarized) def conv3x3_1bit(in_channels, out_channels, stride=1, pad=1, dilate=1, groups=1, use_bias=False, binarized=False): """ Convolution 3x3 layer with binarization. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 pad value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. binarized : bool, default False Whether to use binarization. """ return Convolution2D1bit( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, dilate=dilate, groups=groups, use_bias=use_bias, binarized=binarized) class ConvBlock1bit(Chain): """ Standard convolution block with Batch normalization and ReLU activation, and binarization. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int pad value for convolution layer. dilate : int or tuple/list of 2 int, default 1 dilate value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. bn_affine : bool, default True Whether the BatchNorm layer learns affine parameters. activate : bool, default True Whether activate the convolution block. binarized : bool, default False Whether to use binarization. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, groups=1, use_bias=False, bn_affine=True, activate=True, binarized=False): super(ConvBlock1bit, self).__init__() self.activate = activate with self.init_scope(): self.conv = Convolution2D1bit( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, dilate=dilate, groups=groups, use_bias=use_bias, binarized=binarized) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5, use_gamma=bn_affine, use_beta=bn_affine) if self.activate: self.activ = F.relu def __call__(self, x): x = self.conv(x) x = self.bn(x) if self.activate: x = self.activ(x) return x def conv1x1_block_1bit(in_channels, out_channels, stride=1, pad=0, groups=1, use_bias=False, bn_affine=True, activate=True, binarized=False): """ 1x1 version of the standard convolution block with binarization. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 0 pad value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. bn_affine : bool, default True Whether the BatchNorm layer learns affine parameters. activate : bool, default True Whether activate the convolution block. binarized : bool, default False Whether to use binarization. """ return ConvBlock1bit( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, pad=pad, groups=groups, use_bias=use_bias, bn_affine=bn_affine, activate=activate, binarized=binarized) class PreConvBlock1bit(Chain): """ Convolution block with Batch normalization and ReLU pre-activation, and binarization. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int pad value for convolution layer. dilate : int or tuple/list of 2 int, default 1 dilate value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. bn_affine : bool, default True Whether the BatchNorm layer learns affine parameters. return_preact : bool, default False Whether return pre-activation. It's used by PreResNet. activate : bool, default True Whether activate the convolution block. binarized : bool, default False Whether to use binarization. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, use_bias=False, bn_affine=True, return_preact=False, activate=True, binarized=False): super(PreConvBlock1bit, self).__init__() self.return_preact = return_preact self.activate = activate with self.init_scope(): self.bn = L.BatchNormalization( size=in_channels, eps=1e-5, use_gamma=bn_affine, use_beta=bn_affine) if self.activate: self.activ = F.relu self.conv = Convolution2D1bit( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, dilate=dilate, use_bias=use_bias, binarized=binarized) def __call__(self, x): x = self.bn(x) if self.activate: x = self.activ(x) if self.return_preact: x_pre_activ = x x = self.conv(x) if self.return_preact: return x, x_pre_activ else: return x def pre_conv3x3_block_1bit(in_channels, out_channels, stride=1, pad=1, dilate=1, bn_affine=True, return_preact=False, activate=True, binarized=False): """ 3x3 version of the pre-activated convolution block with binarization. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 pad value for convolution layer. dilate : int or tuple/list of 2 int, default 1 dilate value for convolution layer. bn_affine : bool, default True Whether the BatchNorm layer learns affine parameters. return_preact : bool, default False Whether return pre-activation. activate : bool, default True Whether activate the convolution block. binarized : bool, default False Whether to use binarization. """ return PreConvBlock1bit( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, dilate=dilate, bn_affine=bn_affine, return_preact=return_preact, activate=activate, binarized=binarized) class PreResBlock1bit(Chain): """ Simple PreResNet block for residual path in ResNet unit (with binarization). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. binarized : bool, default False Whether to use binarization. """ def __init__(self, in_channels, out_channels, stride, binarized=False): super(PreResBlock1bit, self).__init__() with self.init_scope(): self.conv1 = pre_conv3x3_block_1bit( in_channels=in_channels, out_channels=out_channels, stride=stride, bn_affine=False, return_preact=False, binarized=binarized) self.conv2 = pre_conv3x3_block_1bit( in_channels=out_channels, out_channels=out_channels, bn_affine=False, binarized=binarized) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class PreResUnit1bit(Chain): """ PreResNet unit with residual connection (with binarization). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. binarized : bool, default False Whether to use binarization. """ def __init__(self, in_channels, out_channels, stride, binarized=False): super(PreResUnit1bit, self).__init__() self.resize_identity = (stride != 1) with self.init_scope(): self.body = PreResBlock1bit( in_channels=in_channels, out_channels=out_channels, stride=stride, binarized=binarized) if self.resize_identity: self.identity_pool = partial( F.average_pooling_2d, ksize=3, stride=2, pad=1) def __call__(self, x): identity = x x = self.body(x) if self.resize_identity: identity = self.identity_pool(identity) channels = identity.shape[1] identity = F.pad(identity, pad_width=((0, 0), (0, channels), (0, 0), (0, 0)), mode="constant", constant_values=0) x = x + identity return x class PreResActivation(Chain): """ PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block. Parameters: ---------- in_channels : int Number of input channels. bn_affine : bool, default True Whether the BatchNorm layer learns affine parameters. """ def __init__(self, in_channels, bn_affine=True): super(PreResActivation, self).__init__() with self.init_scope(): self.bn = L.BatchNormalization( size=in_channels, eps=1e-5, use_gamma=bn_affine, use_beta=bn_affine) self.activ = F.relu def __call__(self, x): x = self.bn(x) x = self.activ(x) return x class CIFARWRN1bit(Chain): """ WRN-1bit model for CIFAR from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. binarized : bool, default True Whether to use binarization. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, binarized=True, in_channels=3, in_size=(32, 32), classes=10): super(CIFARWRN1bit, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3_1bit( in_channels=in_channels, out_channels=init_block_channels, binarized=binarized)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), PreResUnit1bit( in_channels=in_channels, out_channels=out_channels, stride=stride, binarized=binarized)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "post_activ", PreResActivation( in_channels=in_channels, bn_affine=False)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "final_conv", conv1x1_block_1bit( in_channels=in_channels, out_channels=classes, activate=False, binarized=binarized)) setattr(self.output, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) setattr(self.output, "final_flatten", partial( F.reshape, shape=(-1, classes))) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_wrn1bit_cifar(classes, blocks, width_factor, binarized=True, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create WRN-1bit model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. width_factor : int Wide scale factor for width of layers. binarized : bool, default True Whether to use binarization. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci * width_factor] * li for (ci, li) in zip(channels_per_layers, layers)] init_block_channels *= width_factor net = CIFARWRN1bit( channels=channels, init_block_channels=init_block_channels, binarized=binarized, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def wrn20_10_1bit_cifar10(classes=10, **kwargs): """ WRN-20-10-1bit model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=True, model_name="wrn20_10_1bit_cifar10", **kwargs) def wrn20_10_1bit_cifar100(classes=100, **kwargs): """ WRN-20-10-1bit model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=True, model_name="wrn20_10_1bit_cifar100", **kwargs) def wrn20_10_1bit_svhn(classes=10, **kwargs): """ WRN-20-10-1bit model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=True, model_name="wrn20_10_1bit_svhn", **kwargs) def wrn20_10_32bit_cifar10(classes=10, **kwargs): """ WRN-20-10-32bit model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=False, model_name="wrn20_10_32bit_cifar10", **kwargs) def wrn20_10_32bit_cifar100(classes=100, **kwargs): """ WRN-20-10-32bit model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=False, model_name="wrn20_10_32bit_cifar100", **kwargs) def wrn20_10_32bit_svhn(classes=10, **kwargs): """ WRN-20-10-32bit model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=False, model_name="wrn20_10_32bit_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (wrn20_10_1bit_cifar10, 10), (wrn20_10_1bit_cifar100, 100), (wrn20_10_1bit_svhn, 10), (wrn20_10_32bit_cifar10, 10), (wrn20_10_32bit_cifar100, 100), (wrn20_10_32bit_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != wrn20_10_1bit_cifar10 or weight_count == 26737140) assert (model != wrn20_10_1bit_cifar100 or weight_count == 26794920) assert (model != wrn20_10_1bit_svhn or weight_count == 26737140) assert (model != wrn20_10_32bit_cifar10 or weight_count == 26737140) assert (model != wrn20_10_32bit_cifar100 or weight_count == 26794920) assert (model != wrn20_10_32bit_svhn or weight_count == 26737140) x = np.zeros((1, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (1, classes)) if __name__ == "__main__": _test()
25,496
31.031407
125
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/condensenet.py
""" CondenseNet for ImageNet-1K, implemented in Chainer. Original paper: 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,' https://arxiv.org/abs/1711.09224. """ __all__ = ['CondenseNet', 'condensenet74_c4_g4', 'condensenet74_c8_g8'] import os import numpy as np import chainer.functions as F import chainer.links as L from chainer import initializers from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import SimpleSequential, ChannelShuffle class CondenseSimpleConv(Chain): """ CondenseNet specific simple convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. groups : int Number of groups. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, groups): super(CondenseSimpleConv, self).__init__() with self.init_scope(): self.bn = L.BatchNormalization(size=in_channels) self.activ = F.relu self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=True, groups=groups) def __call__(self, x): x = self.bn(x) x = self.activ(x) x = self.conv(x) return x def condense_simple_conv3x3(in_channels, out_channels, groups): """ 3x3 version of the CondenseNet specific simple convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. groups : int Number of groups. """ return CondenseSimpleConv( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=1, pad=1, groups=groups) class CondenseComplexConv(Chain): """ CondenseNet specific complex convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. groups : int Number of groups. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, groups): super(CondenseComplexConv, self).__init__() with self.init_scope(): self.bn = L.BatchNormalization(size=in_channels) self.activ = F.relu self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=True, groups=groups) self.c_shuffle = ChannelShuffle( channels=out_channels, groups=groups) self.index = initializers.generate_array( initializer=initializers._get_initializer(0), shape=(in_channels,), xp=self.xp, dtype=np.int32) self.register_persistent("index") def __call__(self, x): x = self.xp.take(x.array, self.index, axis=1) x = self.bn(x) x = self.activ(x) x = self.conv(x) x = self.c_shuffle(x) return x def condense_complex_conv1x1(in_channels, out_channels, groups): """ 1x1 version of the CondenseNet specific complex convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. groups : int Number of groups. """ return CondenseComplexConv( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=1, pad=0, groups=groups) class CondenseUnit(Chain): """ CondenseNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. groups : int Number of groups. """ def __init__(self, in_channels, out_channels, groups): super(CondenseUnit, self).__init__() bottleneck_size = 4 inc_channels = out_channels - in_channels mid_channels = inc_channels * bottleneck_size with self.init_scope(): self.conv1 = condense_complex_conv1x1( in_channels=in_channels, out_channels=mid_channels, groups=groups) self.conv2 = condense_simple_conv3x3( in_channels=mid_channels, out_channels=inc_channels, groups=groups) def __call__(self, x): identity = x x = self.conv1(x) x = self.conv2(x) x = F.concat((identity, x), axis=1) return x class TransitionBlock(Chain): """ CondenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the first unit of each stage. """ def __init__(self): super(TransitionBlock, self).__init__() with self.init_scope(): self.pool = partial( F.average_pooling_2d, ksize=2, stride=2, pad=0) def __call__(self, x): x = self.pool(x) return x class CondenseInitBlock(Chain): """ CondenseNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(CondenseInitBlock, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=2, pad=1, nobias=True) def __call__(self, x): x = self.conv(x) return x class PostActivation(Chain): """ CondenseNet final block, which performs the same function of postactivation as in PreResNet. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(PostActivation, self).__init__() with self.init_scope(): self.bn = L.BatchNormalization(size=in_channels) self.activ = F.relu def __call__(self, x): x = self.bn(x) x = self.activ(x) return x class CondenseLinear(Chain): """ CondenseNet specific dense block. Parameters: ---------- units : int Number of output channels. in_units : int Number of input channels. drop_rate : float Fraction of input channels for drop. """ def __init__(self, units, in_units, drop_rate=0.5): super(CondenseLinear, self).__init__() drop_in_units = int(in_units * drop_rate) with self.init_scope(): self.dense = L.Linear( in_size=drop_in_units, out_size=units) self.index = initializers.generate_array( initializer=initializers._get_initializer(0), shape=(drop_in_units,), xp=self.xp, dtype=np.int32) self.register_persistent("index") def __call__(self, x): x = self.xp.take(x.array, self.index, axis=1) x = self.dense(x) return x class CondenseNet(Chain): """ CondenseNet model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,' https://arxiv.org/abs/1711.09224. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. groups : int Number of groups in convolution layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, groups, in_channels=3, in_size=(224, 224), classes=1000): super(CondenseNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", CondenseInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): if i != 0: setattr(stage, "trans{}".format(i + 1), TransitionBlock()) for j, out_channels in enumerate(channels_per_stage): setattr(stage, "unit{}".format(j + 1), CondenseUnit( in_channels=in_channels, out_channels=out_channels, groups=groups)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, 'post_activ', PostActivation( in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", CondenseLinear( units=classes, in_units=in_channels)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_condensenet(num_layers, groups=4, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create CondenseNet (converted) model with specific parameters. Parameters: ---------- num_layers : int Number of layers. groups : int Number of groups in convolution layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if num_layers == 74: init_block_channels = 16 layers = [4, 6, 8, 10, 8] growth_rates = [8, 16, 32, 64, 128] else: raise ValueError("Unsupported CondenseNet version with number of layers {}".format(num_layers)) from functools import reduce channels = reduce(lambda xi, yi: xi + [reduce(lambda xj, yj: xj + [xj[-1] + yj], [yi[1]] * yi[0], [xi[-1][-1]])[1:]], zip(layers, growth_rates), [[init_block_channels]])[1:] net = CondenseNet( channels=channels, init_block_channels=init_block_channels, groups=groups, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def condensenet74_c4_g4(**kwargs): """ CondenseNet-74 (C=G=4) model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,' https://arxiv.org/abs/1711.09224. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_condensenet(num_layers=74, groups=4, model_name="condensenet74_c4_g4", **kwargs) def condensenet74_c8_g8(**kwargs): """ CondenseNet-74 (C=G=8) model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,' https://arxiv.org/abs/1711.09224. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_condensenet(num_layers=74, groups=8, model_name="condensenet74_c8_g8", **kwargs) def _test(): import chainer chainer.global_config.train = False pretrained = True models = [ condensenet74_c4_g4, condensenet74_c8_g8, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != condensenet74_c4_g4 or weight_count == 4773944) assert (model != condensenet74_c8_g8 or weight_count == 2935416) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
15,017
28.679842
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/fbnet.py
""" FBNet for ImageNet-1K, implemented in Chainer. Original paper: 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,' https://arxiv.org/abs/1812.03443. """ __all__ = ['FBNet', 'fbnet_cb'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SimpleSequential class FBNetUnit(Chain): """ FBNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the second convolution layer. bn_eps : float Small float added to variance in Batch norm. use_kernel3 : bool Whether to use 3x3 (instead of 5x5) kernel. exp_factor : int Expansion factor for each unit. activation : str, default 'relu' Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, stride, bn_eps, use_kernel3, exp_factor, activation="relu"): super(FBNetUnit, self).__init__() assert (exp_factor >= 1) self.residual = (in_channels == out_channels) and (stride == 1) self.use_exp_conv = True mid_channels = exp_factor * in_channels with self.init_scope(): if self.use_exp_conv: self.exp_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bn_eps=bn_eps, activation=activation) if use_kernel3: self.conv1 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, bn_eps=bn_eps, activation=activation) else: self.conv1 = dwconv5x5_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, bn_eps=bn_eps, activation=activation) self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, bn_eps=bn_eps, activation=None) def __call__(self, x): if self.residual: identity = x if self.use_exp_conv: x = self.exp_conv(x) x = self.conv1(x) x = self.conv2(x) if self.residual: x = x + identity return x class FBNetInitBlock(Chain): """ FBNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(FBNetInitBlock, self).__init__() with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, bn_eps=bn_eps) self.conv2 = FBNetUnit( in_channels=out_channels, out_channels=out_channels, stride=1, bn_eps=bn_eps, use_kernel3=True, exp_factor=1) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class FBNet(Chain): """ FBNet model from 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,' https://arxiv.org/abs/1812.03443. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. exp_factors : list of list of int Expansion factor for each unit. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, kernels3, exp_factors, bn_eps=1e-5, in_channels=3, in_size=(224, 224), classes=1000): super(FBNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", FBNetInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) else 1 use_kernel3 = kernels3[i][j] == 1 exp_factor = exp_factors[i][j] setattr(stage, "unit{}".format(j + 1), FBNetUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bn_eps=bn_eps, use_kernel3=use_kernel3, exp_factor=exp_factor)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels, bn_eps=bn_eps)) in_channels = final_block_channels setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_fbnet(version, bn_eps=1e-5, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create FBNet model with specific parameters. Parameters: ---------- version : str Version of MobileNetV3 ('a', 'b' or 'c'). bn_eps : float, default 1e-5 Small float added to variance in Batch norm. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if version == "c": init_block_channels = 16 final_block_channels = 1984 channels = [[24, 24, 24], [32, 32, 32, 32], [64, 64, 64, 64, 112, 112, 112, 112], [184, 184, 184, 184, 352]] kernels3 = [[1, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1]] exp_factors = [[6, 1, 1], [6, 3, 6, 6], [6, 3, 6, 6, 6, 6, 6, 3], [6, 6, 6, 6, 6]] else: raise ValueError("Unsupported FBNet version {}".format(version)) net = FBNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, kernels3=kernels3, exp_factors=exp_factors, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def fbnet_cb(**kwargs): """ FBNet-Cb model (bn_eps=1e-3) from 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,' https://arxiv.org/abs/1812.03443. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_fbnet(version="c", bn_eps=1e-3, model_name="fbnet_cb", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ fbnet_cb, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != fbnet_cb or weight_count == 5572200) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
10,419
32.290735
116
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/mobilenetv3.py
""" MobileNetV3 for ImageNet-1K, implemented in Chainer. Original paper: 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. """ __all__ = ['MobileNetV3', 'mobilenetv3_small_w7d20', 'mobilenetv3_small_wd2', 'mobilenetv3_small_w3d4', 'mobilenetv3_small_w1', 'mobilenetv3_small_w5d4', 'mobilenetv3_large_w7d20', 'mobilenetv3_large_wd2', 'mobilenetv3_large_w3d4', 'mobilenetv3_large_w1', 'mobilenetv3_large_w5d4'] import os import chainer.functions as F from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock,\ HSwish, SimpleSequential class MobileNetV3Unit(Chain): """ MobileNetV3 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. exp_channels : int Number of middle (expanded) channels. stride : int or tuple/list of 2 int Stride of the second convolution layer. use_kernel3 : bool Whether to use 3x3 (instead of 5x5) kernel. activation : str Activation function or name of activation function. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, exp_channels, stride, use_kernel3, activation, use_se): super(MobileNetV3Unit, self).__init__() assert (exp_channels >= out_channels) self.residual = (in_channels == out_channels) and (stride == 1) self.use_se = use_se self.use_exp_conv = exp_channels != out_channels mid_channels = exp_channels with self.init_scope(): if self.use_exp_conv: self.exp_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=activation) if use_kernel3: self.conv1 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=activation) else: self.conv1 = dwconv5x5_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=activation) if self.use_se: self.se = SEBlock( channels=mid_channels, reduction=4, round_mid=True, out_activation="hsigmoid") self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def __call__(self, x): if self.residual: identity = x if self.use_exp_conv: x = self.exp_conv(x) x = self.conv1(x) if self.use_se: x = self.se(x) x = self.conv2(x) if self.residual: x = x + identity return x class MobileNetV3FinalBlock(Chain): """ MobileNetV3 final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, use_se): super(MobileNetV3FinalBlock, self).__init__() self.use_se = use_se with self.init_scope(): self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation="hswish") if self.use_se: self.se = SEBlock( channels=out_channels, reduction=4, round_mid=True, out_activation="hsigmoid") def __call__(self, x): x = self.conv(x) if self.use_se: x = self.se(x) return x class MobileNetV3Classifier(Chain): """ MobileNetV3 classifier. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels, mid_channels, dropout_rate): super(MobileNetV3Classifier, self).__init__() self.use_dropout = (dropout_rate != 0.0) with self.init_scope(): self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels) self.activ = HSwish() if self.use_dropout: self.dropout = partial( F.dropout, ratio=dropout_rate) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, use_bias=True) def __call__(self, x): x = self.conv1(x) x = self.activ(x) if self.use_dropout: x = self.dropout(x) x = self.conv2(x) return x class MobileNetV3(Chain): """ MobileNetV3 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- channels : list of list of int Number of output channels for each unit. exp_channels : list of list of int Number of middle (expanded) channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. classifier_mid_channels : int Number of middle channels for classifier. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. use_relu : list of list of int/bool Using ReLU activation flag for each unit. use_se : list of list of int/bool Using SE-block flag for each unit. first_stride : bool Whether to use stride for the first stage. final_use_se : bool Whether to use SE-module in the final block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, exp_channels, init_block_channels, final_block_channels, classifier_mid_channels, kernels3, use_relu, use_se, first_stride, final_use_se, in_channels=3, in_size=(224, 224), classes=1000): super(MobileNetV3, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2, activation="hswish")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): exp_channels_ij = exp_channels[i][j] stride = 2 if (j == 0) and ((i != 0) or first_stride) else 1 use_kernel3 = kernels3[i][j] == 1 activation = "relu" if use_relu[i][j] == 1 else "hswish" use_se_flag = use_se[i][j] == 1 setattr(stage, "unit{}".format(j + 1), MobileNetV3Unit( in_channels=in_channels, out_channels=out_channels, exp_channels=exp_channels_ij, use_kernel3=use_kernel3, stride=stride, activation=activation, use_se=use_se_flag)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", MobileNetV3FinalBlock( in_channels=in_channels, out_channels=final_block_channels, use_se=final_use_se)) in_channels = final_block_channels setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "final_conv", MobileNetV3Classifier( in_channels=in_channels, out_channels=classes, mid_channels=classifier_mid_channels, dropout_rate=0.2)) setattr(self.output, "final_flatten", partial( F.reshape, shape=(-1, classes))) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_mobilenetv3(version, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create MobileNetV3 model with specific parameters. Parameters: ---------- version : str Version of MobileNetV3 ('small' or 'large'). width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if version == "small": init_block_channels = 16 channels = [[16], [24, 24], [40, 40, 40, 48, 48], [96, 96, 96]] exp_channels = [[16], [72, 88], [96, 240, 240, 120, 144], [288, 576, 576]] kernels3 = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]] use_relu = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]] use_se = [[1], [0, 0], [1, 1, 1, 1, 1], [1, 1, 1]] first_stride = True final_block_channels = 576 elif version == "large": init_block_channels = 16 channels = [[16], [24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]] exp_channels = [[16], [64, 72], [72, 120, 120], [240, 200, 184, 184, 480, 672], [672, 960, 960]] kernels3 = [[1], [1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]] use_relu = [[1], [1, 1], [1, 1, 1], [0, 0, 0, 0, 0, 0], [0, 0, 0]] use_se = [[0], [0, 0], [1, 1, 1], [0, 0, 0, 0, 1, 1], [1, 1, 1]] first_stride = False final_block_channels = 960 else: raise ValueError("Unsupported MobileNetV3 version {}".format(version)) final_use_se = False classifier_mid_channels = 1280 if width_scale != 1.0: channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels] exp_channels = [[round_channels(cij * width_scale) for cij in ci] for ci in exp_channels] init_block_channels = round_channels(init_block_channels * width_scale) if width_scale > 1.0: final_block_channels = round_channels(final_block_channels * width_scale) net = MobileNetV3( channels=channels, exp_channels=exp_channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, classifier_mid_channels=classifier_mid_channels, kernels3=kernels3, use_relu=use_relu, use_se=use_se, first_stride=first_stride, final_use_se=final_use_se, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def mobilenetv3_small_w7d20(**kwargs): """ MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs) def mobilenetv3_small_wd2(**kwargs): """ MobileNetV3 Small 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.5, model_name="mobilenetv3_small_wd2", **kwargs) def mobilenetv3_small_w3d4(**kwargs): """ MobileNetV3 Small 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.75, model_name="mobilenetv3_small_w3d4", **kwargs) def mobilenetv3_small_w1(**kwargs): """ MobileNetV3 Small 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=1.0, model_name="mobilenetv3_small_w1", **kwargs) def mobilenetv3_small_w5d4(**kwargs): """ MobileNetV3 Small 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=1.25, model_name="mobilenetv3_small_w5d4", **kwargs) def mobilenetv3_large_w7d20(**kwargs): """ MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs) def mobilenetv3_large_wd2(**kwargs): """ MobileNetV3 Large 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.5, model_name="mobilenetv3_large_wd2", **kwargs) def mobilenetv3_large_w3d4(**kwargs): """ MobileNetV3 Large 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.75, model_name="mobilenetv3_large_w3d4", **kwargs) def mobilenetv3_large_w1(**kwargs): """ MobileNetV3 Large 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=1.0, model_name="mobilenetv3_large_w1", **kwargs) def mobilenetv3_large_w5d4(**kwargs): """ MobileNetV3 Large 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=1.25, model_name="mobilenetv3_large_w5d4", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ mobilenetv3_small_w7d20, mobilenetv3_small_wd2, mobilenetv3_small_w3d4, mobilenetv3_small_w1, mobilenetv3_small_w5d4, mobilenetv3_large_w7d20, mobilenetv3_large_wd2, mobilenetv3_large_w3d4, mobilenetv3_large_w1, mobilenetv3_large_w5d4, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != mobilenetv3_small_w7d20 or weight_count == 2159600) assert (model != mobilenetv3_small_wd2 or weight_count == 2288976) assert (model != mobilenetv3_small_w3d4 or weight_count == 2581312) assert (model != mobilenetv3_small_w1 or weight_count == 2945288) assert (model != mobilenetv3_small_w5d4 or weight_count == 3643632) assert (model != mobilenetv3_large_w7d20 or weight_count == 2943080) assert (model != mobilenetv3_large_wd2 or weight_count == 3334896) assert (model != mobilenetv3_large_w3d4 or weight_count == 4263496) assert (model != mobilenetv3_large_w1 or weight_count == 5481752) assert (model != mobilenetv3_large_w5d4 or weight_count == 7459144) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
19,635
34.508137
118
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/diaresnet.py
""" DIA-ResNet for ImageNet-1K, implemented in Chainer. Original paper: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. """ __all__ = ['DIAResNet', 'diaresnet10', 'diaresnet12', 'diaresnet14', 'diaresnetbc14b', 'diaresnet16', 'diaresnet18', 'diaresnet26', 'diaresnetbc26b', 'diaresnet34', 'diaresnetbc38b', 'diaresnet50', 'diaresnet50b', 'diaresnet101', 'diaresnet101b', 'diaresnet152', 'diaresnet152b', 'diaresnet200', 'diaresnet200b', 'DIAAttention', 'DIAResUnit'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, DualPathSequential, SimpleSequential from .resnet import ResBlock, ResBottleneck, ResInitBlock class FirstLSTMAmp(Chain): """ First LSTM amplifier branch. Parameters: ---------- in_size : int Number of input channels. out_size : int Number of output channels. """ def __init__(self, in_size, out_size): super(FirstLSTMAmp, self).__init__() mid_size = in_size // 4 with self.init_scope(): self.fc1 = L.Linear( in_size=in_size, out_size=mid_size) self.activ = F.relu self.fc2 = L.Linear( in_size=mid_size, out_size=out_size) def __call__(self, x): x = self.fc1(x) x = self.activ(x) x = self.fc2(x) return x class DIALSTMCell(Chain): """ DIA-LSTM cell. Parameters: ---------- in_x_features : int Number of x input channels. in_h_features : int Number of h input channels. num_layers : int Number of amplifiers. dropout_rate : float, default 0.1 Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_x_features, in_h_features, num_layers, dropout_rate=0.1): super(DIALSTMCell, self).__init__() out_features = 4 * in_h_features with self.init_scope(): self.x_amps = SimpleSequential() with self.x_amps.init_scope(): for i in range(num_layers): amp_class = FirstLSTMAmp if i == 0 else L.Linear setattr(self.x_amps, "amp{}".format(i + 1), amp_class( in_size=in_x_features, out_size=out_features)) in_x_features = in_h_features self.h_amps = SimpleSequential() with self.h_amps.init_scope(): for i in range(num_layers): amp_class = FirstLSTMAmp if i == 0 else L.Linear setattr(self.h_amps, "amp{}".format(i + 1), amp_class( in_size=in_h_features, out_size=out_features)) self.dropout = partial( F.dropout, ratio=dropout_rate) def __call__(self, x, h, c): hy = [] cy = [] for i, name in enumerate(self.x_amps.layer_names): hx_i = h[i] cx_i = c[i] gates = self.x_amps[name](x) + self.h_amps[name](hx_i) i_gate, f_gate, c_gate, o_gate = F.split_axis(gates, indices_or_sections=4, axis=1) i_gate = F.sigmoid(i_gate) f_gate = F.sigmoid(f_gate) c_gate = F.tanh(c_gate) o_gate = F.sigmoid(o_gate) cy_i = (f_gate * cx_i) + (i_gate * c_gate) hy_i = o_gate * F.sigmoid(cy_i) cy.append(cy_i) hy.append(hy_i) x = self.dropout(hy_i) return hy, cy class DIAAttention(Chain): """ DIA-Net attention module. Parameters: ---------- in_x_features : int Number of x input channels. in_h_features : int Number of h input channels. num_layers : int, default 1 Number of amplifiers. """ def __init__(self, in_x_features, in_h_features, num_layers=1): super(DIAAttention, self).__init__() self.num_layers = num_layers with self.init_scope(): self.lstm = DIALSTMCell( in_x_features=in_x_features, in_h_features=in_h_features, num_layers=num_layers) def __call__(self, x, hc=None): w = F.average_pooling_2d(x, ksize=x.shape[2:]) w = w.reshape((w.shape[0], -1)) if hc is None: h = [self.xp.zeros_like(w.array, dtype=w.dtype)] * self.num_layers c = [self.xp.zeros_like(w.array, dtype=w.dtype)] * self.num_layers else: h, c = hc h, c = self.lstm(w, h, c) w = F.expand_dims(F.expand_dims(h[-1], axis=-1), axis=-1) x = x * w return x, (h, c) class DIAResUnit(Chain): """ DIA-ResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for the second convolution layer in bottleneck. dilate : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer in bottleneck. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. attention : nn.Module, default None Attention module. hold_attention : bool, default False Whether hold attention module. """ def __init__(self, in_channels, out_channels, stride, pad=1, dilate=1, bottleneck=True, conv1_stride=False, attention=None, hold_attention=True): super(DIAResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, pad=pad, dilate=dilate, conv1_stride=conv1_stride) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = F.relu if hold_attention: self.attention = attention if not hold_attention: self.attention = attention def __call__(self, x, hc=None): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x, hc = self.attention(x, hc) x = x + identity x = self.activ(x) return x, hc class DIAResNet(Chain): """ DIA-ResNet model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000): super(DIAResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential(return_two=False) attention = DIAAttention( in_x_features=channels_per_stage[0], in_h_features=channels_per_stage[0]) with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), DIAResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride, attention=attention, hold_attention=(j == 0))) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_diaresnet(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create DIA-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported DIA-ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = DIAResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def diaresnet10(**kwargs): """ DIA-ResNet-10 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=10, model_name="diaresnet10", **kwargs) def diaresnet12(**kwargs): """ DIA-ResNet-12 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=12, model_name="diaresnet12", **kwargs) def diaresnet14(**kwargs): """ DIA-ResNet-14 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=14, model_name="diaresnet14", **kwargs) def diaresnetbc14b(**kwargs): """ DIA-ResNet-BC-14b model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="diaresnetbc14b", **kwargs) def diaresnet16(**kwargs): """ DIA-ResNet-16 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=16, model_name="diaresnet16", **kwargs) def diaresnet18(**kwargs): """ DIA-ResNet-18 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=18, model_name="diaresnet18", **kwargs) def diaresnet26(**kwargs): """ DIA-ResNet-26 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=26, bottleneck=False, model_name="diaresnet26", **kwargs) def diaresnetbc26b(**kwargs): """ DIA-ResNet-BC-26b model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="diaresnetbc26b", **kwargs) def diaresnet34(**kwargs): """ DIA-ResNet-34 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=34, model_name="diaresnet34", **kwargs) def diaresnetbc38b(**kwargs): """ DIA-ResNet-BC-38b model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="diaresnetbc38b", **kwargs) def diaresnet50(**kwargs): """ DIA-ResNet-50 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=50, model_name="diaresnet50", **kwargs) def diaresnet50b(**kwargs): """ DIA-ResNet-50 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=50, conv1_stride=False, model_name="diaresnet50b", **kwargs) def diaresnet101(**kwargs): """ DIA-ResNet-101 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=101, model_name="diaresnet101", **kwargs) def diaresnet101b(**kwargs): """ DIA-ResNet-101 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=101, conv1_stride=False, model_name="diaresnet101b", **kwargs) def diaresnet152(**kwargs): """ DIA-ResNet-152 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=152, model_name="diaresnet152", **kwargs) def diaresnet152b(**kwargs): """ DIA-ResNet-152 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=152, conv1_stride=False, model_name="diaresnet152b", **kwargs) def diaresnet200(**kwargs): """ DIA-ResNet-200 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=200, model_name="diaresnet200", **kwargs) def diaresnet200b(**kwargs): """ DIA-ResNet-200 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=200, conv1_stride=False, model_name="diaresnet200b", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ diaresnet10, diaresnet12, diaresnet14, diaresnetbc14b, diaresnet16, diaresnet18, diaresnet26, diaresnetbc26b, diaresnet34, diaresnetbc38b, diaresnet50, diaresnet50b, diaresnet101, diaresnet101b, diaresnet152, diaresnet152b, diaresnet200, diaresnet200b, ] for model in models: net = model(pretrained=pretrained) # net.to_gpu() weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != diaresnet10 or weight_count == 6297352) assert (model != diaresnet12 or weight_count == 6371336) assert (model != diaresnet14 or weight_count == 6666760) assert (model != diaresnetbc14b or weight_count == 24023976) assert (model != diaresnet16 or weight_count == 7847432) assert (model != diaresnet18 or weight_count == 12568072) assert (model != diaresnet26 or weight_count == 18838792) assert (model != diaresnetbc26b or weight_count == 29954216) assert (model != diaresnet34 or weight_count == 22676232) assert (model != diaresnetbc38b or weight_count == 35884456) assert (model != diaresnet50 or weight_count == 39516072) assert (model != diaresnet50b or weight_count == 39516072) assert (model != diaresnet101 or weight_count == 58508200) assert (model != diaresnet101b or weight_count == 58508200) assert (model != diaresnet152 or weight_count == 74151848) assert (model != diaresnet152b or weight_count == 74151848) assert (model != diaresnet200 or weight_count == 78632872) assert (model != diaresnet200b or weight_count == 78632872) x = np.zeros((1, 3, 224, 224), np.float32) # import cupy # x = cupy.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
25,200
33.055405
116
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/lffd.py
""" LFFD for face detection, implemented in Chainer. Original paper: 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633. """ __all__ = ['LFFD', 'lffd20x5s320v2_widerface', 'lffd25x8s560v1_widerface'] import os import chainer.functions as F from chainer import Chain from chainer.serializers import load_npz from .common import conv3x3, conv1x1_block, conv3x3_block, Concurrent, MultiOutputSequential, ParallelConcurent from .resnet import ResUnit from .preresnet import PreResUnit class LffdDetectionBranch(Chain): """ LFFD specific detection branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_bias : bool Whether the layer uses a bias vector. use_bn : bool Whether to use BatchNorm layer. """ def __init__(self, in_channels, out_channels, use_bias, use_bn, **kwargs): super(LffdDetectionBranch, self).__init__(**kwargs) with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=in_channels, use_bias=use_bias, use_bn=use_bn) self.conv2 = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn, activation=None) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class LffdDetectionBlock(Chain): """ LFFD specific detection block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. use_bias : bool Whether the layer uses a bias vector. use_bn : bool Whether to use BatchNorm layer. """ def __init__(self, in_channels, mid_channels, use_bias, use_bn, **kwargs): super(LffdDetectionBlock, self).__init__(**kwargs) with self.init_scope(): self.conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_bias=use_bias, use_bn=use_bn) self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "bbox_branch", LffdDetectionBranch( in_channels=mid_channels, out_channels=4, use_bias=use_bias, use_bn=use_bn)) setattr(self.branches, "score_branch", LffdDetectionBranch( in_channels=mid_channels, out_channels=2, use_bias=use_bias, use_bn=use_bn)) def __call__(self, x): x = self.conv(x) x = self.branches(x) return x class LFFD(Chain): """ LFFD model from 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633. Parameters: ---------- enc_channels : list of int Number of output channels for each encoder stage. dec_channels : int Number of output channels for each decoder stage. init_block_channels : int Number of output channels for the initial encoder unit. layers : list of int Number of units in each encoder stage. int_bends : list of int Number of internal bends for each encoder stage. use_preresnet : bool Whether to use PreResnet backbone instead of ResNet. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (640, 640) Spatial size of the expected input image. """ def __init__(self, enc_channels, dec_channels, init_block_channels, layers, int_bends, use_preresnet, in_channels=3, in_size=(640, 640), **kwargs): super(LFFD, self).__init__(**kwargs) self.in_size = in_size unit_class = PreResUnit if use_preresnet else ResUnit use_bias = True use_bn = False with self.init_scope(): self.encoder = MultiOutputSequential(return_last=False) with self.encoder.init_scope(): setattr(self.encoder, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2, pad=0, use_bias=use_bias, use_bn=use_bn)) in_channels = init_block_channels for i, channels_per_stage in enumerate(enc_channels): layers_per_stage = layers[i] int_bends_per_stage = int_bends[i] stage = MultiOutputSequential(multi_output=False, dual_output=True) with stage.init_scope(): setattr(stage, "trans{}".format(i + 1), conv3x3( in_channels=in_channels, out_channels=channels_per_stage, stride=2, pad=0, use_bias=use_bias)) for j in range(layers_per_stage): unit = unit_class( in_channels=channels_per_stage, out_channels=channels_per_stage, stride=1, use_bias=use_bias, use_bn=use_bn, bottleneck=False) if layers_per_stage - j <= int_bends_per_stage: unit.do_output = True setattr(stage, "unit{}".format(j + 1), unit) final_activ = F.relu final_activ.do_output = True setattr(stage, "final_activ", final_activ) stage.do_output2 = True in_channels = channels_per_stage setattr(self.encoder, "stage{}".format(i + 1), stage) self.decoder = ParallelConcurent() with self.decoder.init_scope(): k = 0 for i, channels_per_stage in enumerate(enc_channels): layers_per_stage = layers[i] int_bends_per_stage = int_bends[i] for j in range(layers_per_stage): if layers_per_stage - j <= int_bends_per_stage: setattr(self.decoder, "unit{}".format(k + 1), LffdDetectionBlock( in_channels=channels_per_stage, mid_channels=dec_channels, use_bias=use_bias, use_bn=use_bn)) k += 1 setattr(self.decoder, "unit{}".format(k + 1), LffdDetectionBlock( in_channels=channels_per_stage, mid_channels=dec_channels, use_bias=use_bias, use_bn=use_bn)) k += 1 def __call__(self, x): x = self.encoder(x) x = self.decoder(x) return x def get_lffd(blocks, use_preresnet, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create LFFD model with specific parameters. Parameters: ---------- blocks : int Number of blocks. use_preresnet : bool Whether to use PreResnet backbone instead of ResNet. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 20: layers = [3, 1, 1, 1, 1] enc_channels = [64, 64, 64, 128, 128] int_bends = [0, 0, 0, 0, 0] elif blocks == 25: layers = [4, 2, 1, 3] enc_channels = [64, 64, 128, 128] int_bends = [1, 1, 0, 2] else: raise ValueError("Unsupported LFFD with number of blocks: {}".format(blocks)) dec_channels = 128 init_block_channels = 64 net = LFFD( enc_channels=enc_channels, dec_channels=dec_channels, init_block_channels=init_block_channels, layers=layers, int_bends=int_bends, use_preresnet=use_preresnet, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def lffd20x5s320v2_widerface(**kwargs): """ LFFD-320-20L-5S-V2 model for WIDER FACE from 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_lffd(blocks=20, use_preresnet=True, model_name="lffd20x5s320v2_widerface", **kwargs) def lffd25x8s560v1_widerface(**kwargs): """ LFFD-560-25L-8S-V1 model for WIDER FACE from 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_lffd(blocks=25, use_preresnet=False, model_name="lffd25x8s560v1_widerface", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False in_size = (640, 640) pretrained = False models = [ (lffd20x5s320v2_widerface, 5), (lffd25x8s560v1_widerface, 8), ] for model, num_outs in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != lffd20x5s320v2_widerface or weight_count == 1520606) assert (model != lffd25x8s560v1_widerface or weight_count == 2290608) batch = 14 x = np.zeros((batch, 3, in_size[0], in_size[1]), np.float32) y = net(x) assert (len(y) == num_outs) if __name__ == "__main__": _test()
11,287
33.31003
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/sepreresnet.py
""" SE-PreResNet for ImageNet-1K, implemented in Chainer. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['SEPreResNet', 'sepreresnet10', 'sepreresnet12', 'sepreresnet14', 'sepreresnet16', 'sepreresnet18', 'sepreresnet26', 'sepreresnetbc26b', 'sepreresnet34', 'sepreresnetbc38b', 'sepreresnet50', 'sepreresnet50b', 'sepreresnet101', 'sepreresnet101b', 'sepreresnet152', 'sepreresnet152b', 'sepreresnet200', 'sepreresnet200b', 'SEPreResUnit'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, SEBlock, SimpleSequential from .preresnet import PreResBlock, PreResBottleneck, PreResInitBlock, PreResActivation class SEPreResUnit(Chain): """ SE-PreResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, bottleneck, conv1_stride): super(SEPreResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): if bottleneck: self.body = PreResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride) else: self.body = PreResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) self.se = SEBlock(channels=out_channels) if self.resize_identity: self.identity_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, stride=stride) def __call__(self, x): identity = x x, x_pre_activ = self.body(x) x = self.se(x) if self.resize_identity: identity = self.identity_conv(x_pre_activ) x = x + identity return x class SEPreResNet(Chain): """ SE-PreResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000): super(SEPreResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), SEPreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "post_activ", PreResActivation( in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_sepreresnet(blocks, bottleneck=None, conv1_stride=True, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create SE-PreResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] elif blocks == 269: layers = [3, 30, 48, 8] else: raise ValueError("Unsupported SE-PreResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = SEPreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def sepreresnet10(**kwargs): """ SE-PreResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=10, model_name="sepreresnet10", **kwargs) def sepreresnet12(**kwargs): """ SE-PreResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=12, model_name="sepreresnet12", **kwargs) def sepreresnet14(**kwargs): """ SE-PreResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=14, model_name="sepreresnet14", **kwargs) def sepreresnet16(**kwargs): """ SE-PreResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=16, model_name="sepreresnet16", **kwargs) def sepreresnet18(**kwargs): """ SE-PreResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=18, model_name="sepreresnet18", **kwargs) def sepreresnet26(**kwargs): """ SE-PreResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=26, bottleneck=False, model_name="sepreresnet26", **kwargs) def sepreresnetbc26b(**kwargs): """ SE-PreResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc26b", **kwargs) def sepreresnet34(**kwargs): """ SE-PreResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=34, model_name="sepreresnet34", **kwargs) def sepreresnetbc38b(**kwargs): """ SE-PreResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc38b", **kwargs) def sepreresnet50(**kwargs): """ SE-PreResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=50, model_name="sepreresnet50", **kwargs) def sepreresnet50b(**kwargs): """ SE-PreResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=50, conv1_stride=False, model_name="sepreresnet50b", **kwargs) def sepreresnet101(**kwargs): """ SE-PreResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=101, model_name="sepreresnet101", **kwargs) def sepreresnet101b(**kwargs): """ SE-PreResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=101, conv1_stride=False, model_name="sepreresnet101b", **kwargs) def sepreresnet152(**kwargs): """ SE-PreResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=152, model_name="sepreresnet152", **kwargs) def sepreresnet152b(**kwargs): """ SE-PreResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=152, conv1_stride=False, model_name="sepreresnet152b", **kwargs) def sepreresnet200(**kwargs): """ SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs) def sepreresnet200b(**kwargs): """ SE-PreResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=200, conv1_stride=False, model_name="sepreresnet200b", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ sepreresnet10, sepreresnet12, sepreresnet14, sepreresnet16, sepreresnet18, sepreresnet26, sepreresnetbc26b, sepreresnet34, sepreresnetbc38b, sepreresnet50, sepreresnet50b, sepreresnet101, sepreresnet101b, sepreresnet152, sepreresnet152b, sepreresnet200, sepreresnet200b, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != sepreresnet10 or weight_count == 5461668) assert (model != sepreresnet12 or weight_count == 5536232) assert (model != sepreresnet14 or weight_count == 5833840) assert (model != sepreresnet16 or weight_count == 7022976) assert (model != sepreresnet18 or weight_count == 11776928) assert (model != sepreresnet26 or weight_count == 18092188) assert (model != sepreresnetbc26b or weight_count == 17388424) assert (model != sepreresnet34 or weight_count == 21957204) assert (model != sepreresnetbc38b or weight_count == 24019064) assert (model != sepreresnet50 or weight_count == 28080472) assert (model != sepreresnet50b or weight_count == 28080472) assert (model != sepreresnet101 or weight_count == 49319320) assert (model != sepreresnet101b or weight_count == 49319320) assert (model != sepreresnet152 or weight_count == 66814296) assert (model != sepreresnet152b or weight_count == 66814296) assert (model != sepreresnet200 or weight_count == 71828312) assert (model != sepreresnet200b or weight_count == 71828312) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
18,746
33.209854
119
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/resnext.py
""" ResNeXt for ImageNet-1K, implemented in Chainer. Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. """ __all__ = ['ResNeXt', 'resnext14_16x4d', 'resnext14_32x2d', 'resnext14_32x4d', 'resnext26_16x4d', 'resnext26_32x2d', 'resnext26_32x4d', 'resnext38_32x4d', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d', 'ResNeXtBottleneck', 'ResNeXtUnit'] import os import math import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, conv3x3_block, SimpleSequential from .resnet import ResInitBlock class ResNeXtBottleneck(Chain): """ ResNeXt bottleneck block for residual path in ResNeXt unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width, bottleneck_factor=4): super(ResNeXtBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor D = int(math.floor(mid_channels * (bottleneck_width / 64.0))) group_width = cardinality * D with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=group_width) self.conv2 = conv3x3_block( in_channels=group_width, out_channels=group_width, stride=stride, groups=cardinality) self.conv3 = conv1x1_block( in_channels=group_width, out_channels=out_channels, activation=None) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class ResNeXtUnit(Chain): """ ResNeXt unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. """ def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width): super(ResNeXtUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): self.body = ResNeXtBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class ResNeXt(Chain): """ ResNeXt model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(224, 224), classes=1000): super(ResNeXt, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), ResNeXtUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_resnext(blocks, cardinality, bottleneck_width, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ResNeXt model with specific parameters. Parameters: ---------- blocks : int Number of blocks. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 14: layers = [1, 1, 1, 1] elif blocks == 26: layers = [2, 2, 2, 2] elif blocks == 38: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] else: raise ValueError("Unsupported ResNeXt with number of blocks: {}".format(blocks)) assert (sum(layers) * 3 + 2 == blocks) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = ResNeXt( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def resnext14_16x4d(**kwargs): """ ResNeXt-14 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext(blocks=14, cardinality=16, bottleneck_width=4, model_name="resnext14_16x4d", **kwargs) def resnext14_32x2d(**kwargs): """ ResNeXt-14 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext(blocks=14, cardinality=32, bottleneck_width=2, model_name="resnext14_32x2d", **kwargs) def resnext14_32x4d(**kwargs): """ ResNeXt-14 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext(blocks=14, cardinality=32, bottleneck_width=4, model_name="resnext14_32x4d", **kwargs) def resnext26_16x4d(**kwargs): """ ResNeXt-26 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext(blocks=26, cardinality=16, bottleneck_width=4, model_name="resnext26_16x4d", **kwargs) def resnext26_32x2d(**kwargs): """ ResNeXt-26 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext(blocks=26, cardinality=32, bottleneck_width=2, model_name="resnext26_32x2d", **kwargs) def resnext26_32x4d(**kwargs): """ ResNeXt-26 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext(blocks=26, cardinality=32, bottleneck_width=4, model_name="resnext26_32x4d", **kwargs) def resnext38_32x4d(**kwargs): """ ResNeXt-38 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext(blocks=38, cardinality=32, bottleneck_width=4, model_name="resnext38_32x4d", **kwargs) def resnext50_32x4d(**kwargs): """ ResNeXt-50 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="resnext50_32x4d", **kwargs) def resnext101_32x4d(**kwargs): """ ResNeXt-101 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="resnext101_32x4d", **kwargs) def resnext101_64x4d(**kwargs): """ ResNeXt-101 (64x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="resnext101_64x4d", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ resnext14_16x4d, resnext14_32x2d, resnext14_32x4d, resnext26_16x4d, resnext26_32x2d, resnext26_32x4d, resnext38_32x4d, resnext50_32x4d, resnext101_32x4d, resnext101_64x4d, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnext14_16x4d or weight_count == 7127336) assert (model != resnext14_32x2d or weight_count == 7029416) assert (model != resnext14_32x4d or weight_count == 9411880) assert (model != resnext26_16x4d or weight_count == 10119976) assert (model != resnext26_32x2d or weight_count == 9924136) assert (model != resnext26_32x4d or weight_count == 15389480) assert (model != resnext38_32x4d or weight_count == 21367080) assert (model != resnext50_32x4d or weight_count == 25028904) assert (model != resnext101_32x4d or weight_count == 44177704) assert (model != resnext101_64x4d or weight_count == 83455272) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
15,179
32.144105
119
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/jasper.py
""" Jasper/DR for ASR, implemented in Chainer. Original paper: 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. """ __all__ = ['Jasper', 'jasper5x3', 'jasper10x4', 'jasper10x5', 'get_jasper', 'MaskConv1d', 'NemoAudioReader', 'NemoMelSpecExtractor', 'CtcDecoder'] import os import numpy as np import chainer.functions as F import chainer.links as L from chainer import Chain from chainer import initializers from chainer.initializers.constant import Constant from functools import partial from chainer.serializers import load_npz from .common import DualPathSequential, DualPathParallelConcurent class NemoAudioReader(object): """ Audio Reader from NVIDIA NEMO toolkit. Parameters: ---------- desired_audio_sample_rate : int, default 16000 Desired audio sample rate. trunc_value : int or None, default None Value to truncate. """ def __init__(self, desired_audio_sample_rate=16000): super(NemoAudioReader, self).__init__() self.desired_audio_sample_rate = desired_audio_sample_rate def read_from_file(self, audio_file_path): """ Read audio from file. Parameters: ---------- audio_file_path : str Path to audio file. Returns: ------- np.array Audio data. """ from soundfile import SoundFile with SoundFile(audio_file_path, "r") as data: sample_rate = data.samplerate audio_data = data.read(dtype="float32") audio_data = audio_data.transpose() if sample_rate != self.desired_audio_sample_rate: from librosa.core import resample as lr_resample audio_data = lr_resample(y=audio_data, orig_sr=sample_rate, target_sr=self.desired_audio_sample_rate) if audio_data.ndim >= 2: audio_data = np.mean(audio_data, axis=1) return audio_data def read_from_files(self, audio_file_paths): """ Read audios from files. Parameters: ---------- audio_file_paths : list of str Paths to audio files. Returns: ------- list of np.array Audio data. """ assert (type(audio_file_paths) in (list, tuple)) audio_data_list = [] for audio_file_path in audio_file_paths: audio_data = self.read_from_file(audio_file_path) audio_data_list.append(audio_data) return audio_data_list class NemoMelSpecExtractor(Chain): """ Mel-Spectrogram Extractor from NVIDIA NEMO toolkit. Parameters: ---------- sample_rate : int, default 16000 Sample rate of the input audio data. window_size_sec : float, default 0.02 Size of window for FFT in seconds. window_stride_sec : float, default 0.01 Stride of window for FFT in seconds. n_fft : int, default 512 Length of FT window. n_filters : int, default 64 Number of Mel spectrogram freq bins. preemph : float, default 0.97 Amount of pre emphasis to add to audio. dither : float, default 1.0e-05 Amount of white-noise dithering. """ def __init__(self, sample_rate=16000, window_size_sec=0.02, window_stride_sec=0.01, n_fft=512, n_filters=64, preemph=0.97, dither=1.0e-05): super(NemoMelSpecExtractor, self).__init__() self.log_zero_guard_value = 2 ** -24 win_length = int(window_size_sec * sample_rate) self.hop_length = int(window_stride_sec * sample_rate) self.n_filters = n_filters from scipy import signal as scipy_signal from librosa import stft as librosa_stft window_arr = scipy_signal.hann(win_length, sym=True) self.stft = lambda x: librosa_stft( x, n_fft=n_fft, hop_length=self.hop_length, win_length=win_length, window=window_arr, center=True) self.dither = dither self.preemph = preemph self.pad_align = 16 from librosa.filters import mel as librosa_mel self.fb_arr = librosa_mel( sample_rate, n_fft, n_mels=n_filters, fmin=0, fmax=(sample_rate / 2)) with self.init_scope(): self.window = initializers.generate_array( initializer=Constant(0, dtype="float32"), shape=window_arr.shape, xp=self.xp, dtype=np.float32) self.register_persistent("window") self.fb = initializers.generate_array( initializer=Constant(0, dtype="float32"), shape=np.expand_dims(self.fb_arr, axis=0).shape, xp=self.xp, dtype="float32") self.register_persistent("fb") def __call__(self, xs): x_eps = 1e-5 batch = len(xs) x_len = np.zeros((batch,), dtype=np.long) ys = [] for i, xi in enumerate(xs): x_len[i] = np.ceil(float(len(xi)) / self.hop_length).astype(np.long) if self.dither > 0: xi += self.dither * np.random.randn(*xi.shape) xi = np.concatenate((xi[:1], xi[1:] - self.preemph * xi[:-1]), axis=0) yi = self.stft(xi) yi = np.abs(yi) yi = np.square(yi) yi = np.matmul(self.fb_arr, yi) yi = np.log(yi + self.log_zero_guard_value) assert (yi.shape[1] != 1) yi_mean = yi.mean(axis=1) yi_std = yi.std(axis=1) yi_std += x_eps yi = (yi - np.expand_dims(yi_mean, axis=-1)) / np.expand_dims(yi_std, axis=-1) ys.append(yi) channels = ys[0].shape[0] x_len_max = max([yj.shape[-1] for yj in ys]) x = np.zeros((batch, channels, x_len_max), dtype=np.float32) for i, yi in enumerate(ys): x_len_i = x_len[i] x[i, :, :x_len_i] = yi[:, :x_len_i] pad_rem = x_len_max % self.pad_align if pad_rem != 0: x = np.pad(x, ((0, 0), (0, 0), (0, self.pad_align - pad_rem))) return x, x_len def calc_flops(self, x): assert (x.shape[0] == 1) num_flops = x[0].size num_macs = 0 return num_flops, num_macs class CtcDecoder(object): """ CTC decoder (to decode a sequence of labels to words). Parameters: ---------- vocabulary : list of str Vocabulary of the dataset. """ def __init__(self, vocabulary): super().__init__() self.blank_id = len(vocabulary) self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))]) def __call__(self, predictions): """ Decode a sequence of labels to words. Parameters: ---------- predictions : np.array of int or list of list of int Tensor with predicted labels. Returns: ------- list of str Words. """ hypotheses = [] for prediction in predictions: decoded_prediction = [] previous = self.blank_id for p in prediction: if (p != previous or previous == self.blank_id) and p != self.blank_id: decoded_prediction.append(p) previous = p hypothesis = "".join([self.labels_map[c] for c in decoded_prediction]) hypotheses.append(hypothesis) return hypotheses def conv1d1(in_channels, out_channels, stride=1, groups=1, use_bias=False, **kwargs): """ 1-dim kernel version of the 1D convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int, default 1 Stride of the convolution. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. """ return L.Convolution1D( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, nobias=(not use_bias), groups=groups, **kwargs) class MaskConv1d(L.Convolution1D): """ Masked 1D convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 1 int Convolution window size. stride : int or tuple/list of 1 int Stride of the convolution. pad : int or tuple/list of 1 int, default 0 Padding value for convolution layer. dilate : int or tuple/list of 1 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_mask : bool, default True Whether to use mask. """ def __init__(self, in_channels, out_channels, ksize, stride, pad=0, dilate=1, groups=1, use_bias=False, use_mask=True, **kwargs): super(MaskConv1d, self).__init__( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=(not use_bias), dilate=dilate, groups=groups, **kwargs) self.use_mask = use_mask if self.use_mask: self.stride0 = stride[0] if isinstance(stride, (list, tuple)) else stride self.pad0 = pad[0] if isinstance(pad, (list, tuple)) else pad def __call__(self, x, x_len): if self.use_mask: mask = F.broadcast_to(self.xp.arange(x.shape[2]), x.shape).array <\ F.expand_dims(F.expand_dims(x_len, -1), -1).array x *= mask x_len = (x_len + 2 * self.pad0 - self.dilate[0] * (self.ksize[0] - 1) - 1) // self.stride0 + 1 x = F.convolution_1d( x=x, W=self.W, b=self.b, stride=self.stride, pad=self.pad, dilate=self.dilate, groups=self.groups) return x, x_len def mask_conv1d1(in_channels, out_channels, stride=1, groups=1, use_bias=False, **kwargs): """ Masked 1-dim kernel version of the 1D convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int, default 1 Stride of the convolution. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. """ return MaskConv1d( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, groups=groups, use_bias=use_bias, **kwargs) class MaskConvBlock1d(Chain): """ Masked 1D convolution block with batch normalization, activation, and dropout. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int Convolution window size. stride : int Stride of the convolution. pad : int Padding value for convolution layer. dilate : int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.Activation('relu') Activation function or name of activation function. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu), dropout_rate=0.0, **kwargs): super(MaskConvBlock1d, self).__init__(**kwargs) self.activate = (activation is not None) self.use_bn = use_bn self.use_dropout = (dropout_rate != 0.0) with self.init_scope(): self.conv = MaskConv1d( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, dilate=dilate, groups=groups, use_bias=use_bias) if self.use_bn: self.bn = L.BatchNormalization( size=out_channels, eps=bn_eps) if self.activate: self.activ = activation() if self.use_dropout: self.dropout = partial( F.dropout, ratio=dropout_rate) def __call__(self, x, x_len): x, x_len = self.conv(x, x_len) if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) if self.use_dropout: x = self.dropout(x) return x, x_len def mask_conv1d1_block(in_channels, out_channels, stride=1, pad=0, **kwargs): """ 1-dim kernel version of the masked 1D convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int, default 1 Stride of the convolution. pad : int, default 0 Padding value for convolution layer. """ return MaskConvBlock1d( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, pad=pad, **kwargs) class ChannelShuffle1d(Chain): """ 1D version of the channel shuffle layer. Parameters: ---------- channels : int Number of channels. groups : int Number of groups. """ def __init__(self, channels, groups, **kwargs): super(ChannelShuffle1d, self).__init__(**kwargs) assert (channels % groups == 0) self.groups = groups def __call__(self, x): batch, channels, seq_len = x.shape channels_per_group = channels // self.groups x = F.reshape(x, shape=(batch, self.groups, channels_per_group, seq_len)) x = F.swapaxes(x, axis1=1, axis2=2) x = F.reshape(x, shape=(batch, channels, seq_len)) return x def __repr__(self): s = "{name}(groups={groups})" return s.format( name=self.__class__.__name__, groups=self.groups) class DwsConvBlock1d(Chain): """ Depthwise version of the 1D standard convolution block with batch normalization, activation, dropout, and channel shuffle. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int Convolution window size. stride : int Stride of the convolution. pad : int Padding value for convolution layer. dilate : int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.Activation('relu') Activation function or name of activation function. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu), dropout_rate=0.0, **kwargs): super(DwsConvBlock1d, self).__init__(**kwargs) self.activate = (activation is not None) self.use_bn = use_bn self.use_dropout = (dropout_rate != 0.0) self.use_channel_shuffle = (groups > 1) with self.init_scope(): self.dw_conv = MaskConv1d( in_channels=in_channels, out_channels=in_channels, ksize=ksize, stride=stride, pad=pad, dilate=dilate, groups=in_channels, use_bias=use_bias) self.pw_conv = mask_conv1d1( in_channels=in_channels, out_channels=out_channels, groups=groups, use_bias=use_bias) if self.use_channel_shuffle: self.shuffle = ChannelShuffle1d( channels=out_channels, groups=groups) if self.use_bn: self.bn = L.BatchNormalization( size=out_channels, eps=bn_eps) if self.activate: self.activ = activation() if self.use_dropout: self.dropout = partial( F.dropout, ratio=dropout_rate) def __call__(self, x, x_len): x, x_len = self.dw_conv(x, x_len) x, x_len = self.pw_conv(x, x_len) if self.use_channel_shuffle: x = self.shuffle(x) if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) if self.use_dropout: x = self.dropout(x) return x, x_len class JasperUnit(Chain): """ Jasper unit with residual connection. Parameters: ---------- in_channels : int or list of int Number of input channels. out_channels : int Number of output channels. ksize : int Convolution window size. bn_eps : float Small float added to variance in Batch norm. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. repeat : int Count of body convolution blocks. use_dw : bool Whether to use depthwise block. use_dr : bool Whether to use dense residual scheme. """ def __init__(self, in_channels, out_channels, ksize, bn_eps, dropout_rate, repeat, use_dw, use_dr, **kwargs): super(JasperUnit, self).__init__(**kwargs) self.use_dropout = (dropout_rate != 0.0) self.use_dr = use_dr block_class = DwsConvBlock1d if use_dw else MaskConvBlock1d with self.init_scope(): if self.use_dr: self.identity_block = DualPathParallelConcurent() with self.identity_block.init_scope(): for i, dense_in_channels_i in enumerate(in_channels): setattr(self.identity_block, "block{}".format(i + 1), mask_conv1d1_block( in_channels=dense_in_channels_i, out_channels=out_channels, bn_eps=bn_eps, dropout_rate=0.0, activation=None)) in_channels = in_channels[-1] else: self.identity_block = mask_conv1d1_block( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps, dropout_rate=0.0, activation=None) self.body = DualPathSequential() with self.body.init_scope(): for i in range(repeat): activation = (lambda: F.relu) if i < repeat - 1 else None dropout_rate_i = dropout_rate if i < repeat - 1 else 0.0 setattr(self.body, "block{}".format(i + 1), block_class( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=1, pad=(ksize // 2), bn_eps=bn_eps, dropout_rate=dropout_rate_i, activation=activation)) in_channels = out_channels self.activ = F.relu if self.use_dropout: self.dropout = partial( F.dropout, ratio=dropout_rate) def __call__(self, x, x_len): if self.use_dr: x_len, y, y_len = x_len if type(x_len) is tuple else (x_len, None, None) y = [x] if y is None else y + [x] y_len = [x_len] if y_len is None else y_len + [x_len] identity, _ = self.identity_block(y, y_len) identity = F.stack(tuple(identity), axis=1) identity = F.sum(identity, axis=1) else: identity, _ = self.identity_block(x, x_len) x, x_len = self.body(x, x_len) x = x + identity x = self.activ(x) if self.use_dropout: x = self.dropout(x) if self.use_dr: return x, (x_len, y, y_len) else: return x, x_len class JasperFinalBlock(Chain): """ Jasper specific final block. Parameters: ---------- in_channels : int Number of input channels. channels : list of int Number of output channels for each block. ksizes : list of int Kernel sizes for each block. bn_eps : float Small float added to variance in Batch norm. dropout_rates : list of int Dropout rates for each block. use_dw : bool Whether to use depthwise block. use_dr : bool Whether to use dense residual scheme. """ def __init__(self, in_channels, channels, ksizes, bn_eps, dropout_rates, use_dw, use_dr, **kwargs): super(JasperFinalBlock, self).__init__(**kwargs) self.use_dr = use_dr conv1_class = DwsConvBlock1d if use_dw else MaskConvBlock1d with self.init_scope(): self.conv1 = conv1_class( in_channels=in_channels, out_channels=channels[-2], ksize=ksizes[-2], stride=1, pad=(2 * ksizes[-2] // 2 - 1), dilate=2, bn_eps=bn_eps, dropout_rate=dropout_rates[-2]) self.conv2 = MaskConvBlock1d( in_channels=channels[-2], out_channels=channels[-1], ksize=ksizes[-1], stride=1, pad=(ksizes[-1] // 2), bn_eps=bn_eps, dropout_rate=dropout_rates[-1]) def __call__(self, x, x_len): if self.use_dr: x_len = x_len[0] x, x_len = self.conv1(x, x_len) x, x_len = self.conv2(x, x_len) return x, x_len class Jasper(Chain): """ Jasper/DR/QuartzNet model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- channels : list of int Number of output channels for each unit and initial/final block. ksizes : list of int Kernel sizes for each unit and initial/final block. bn_eps : float Small float added to variance in Batch norm. dropout_rates : list of int Dropout rates for each unit and initial/final block. repeat : int Count of body convolution blocks. use_dw : bool Whether to use depthwise block. use_dr : bool Whether to use dense residual scheme. from_audio : bool, default True Whether to treat input as audio instead of Mel-specs. dither : float, default 0.0 Amount of white-noise dithering. return_text : bool, default False Whether to return text instead of logits. vocabulary : list of str or None, default None Vocabulary of the dataset. in_channels : int, default 64 Number of input channels (audio features). classes : int, default 29 Number of classification classes (number of graphemes). """ def __init__(self, channels, ksizes, bn_eps, dropout_rates, repeat, use_dw, use_dr, from_audio=True, dither=0.0, return_text=False, vocabulary=None, in_channels=64, classes=29, **kwargs): super(Jasper, self).__init__(**kwargs) self.in_size = in_channels self.classes = classes self.vocabulary = vocabulary self.from_audio = from_audio self.return_text = return_text with self.init_scope(): if self.from_audio: self.preprocessor = NemoMelSpecExtractor(dither=dither) self.features = DualPathSequential() with self.features.init_scope(): init_block_class = DwsConvBlock1d if use_dw else MaskConvBlock1d setattr(self.features, "init_block", init_block_class( in_channels=in_channels, out_channels=channels[0], ksize=ksizes[0], stride=2, pad=(ksizes[0] // 2), bn_eps=bn_eps, dropout_rate=dropout_rates[0])) in_channels = channels[0] in_channels_list = [] for i, (out_channels, ksize, dropout_rate) in\ enumerate(zip(channels[1:-2], ksizes[1:-2], dropout_rates[1:-2])): in_channels_list += [in_channels] setattr(self.features, "unit{}".format(i + 1), JasperUnit( in_channels=(in_channels_list if use_dr else in_channels), out_channels=out_channels, ksize=ksize, bn_eps=bn_eps, dropout_rate=dropout_rate, repeat=repeat, use_dw=use_dw, use_dr=use_dr)) in_channels = out_channels setattr(self.features, "final_block", JasperFinalBlock( in_channels=in_channels, channels=channels, ksizes=ksizes, bn_eps=bn_eps, dropout_rates=dropout_rates, use_dw=use_dw, use_dr=use_dr)) in_channels = channels[-1] self.output = conv1d1( in_channels=in_channels, out_channels=classes, use_bias=True) if self.return_text: self.ctc_decoder = CtcDecoder(vocabulary=vocabulary) def __call__(self, x, x_len=None): if x_len is None: assert (type(x) in (list, tuple)) x, x_len = x if self.from_audio: x, x_len = self.preprocessor(x if type(x) is np.ndarray else x.array) x, x_len = self.features(x, x_len) x = self.output(x) if self.return_text: greedy_predictions = x.swapaxes(1, 2).log_softmax(dim=-1).argmax(dim=-1, keepdim=False).asnumpy() return self.ctc_decoder(greedy_predictions) else: return x, x_len def get_jasper(version, use_dw=False, use_dr=False, bn_eps=1e-3, vocabulary=None, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create Jasper/DR/QuartzNet model with specific parameters. Parameters: ---------- version : tuple of str Model type and configuration. use_dw : bool, default False Whether to use depthwise block. use_dr : bool, default False Whether to use dense residual scheme. bn_eps : float, default 1e-3 Small float added to variance in Batch norm. vocabulary : list of str or None, default None Vocabulary of the dataset. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ import numpy as np blocks, repeat = tuple(map(int, version[1].split("x"))) main_stage_repeat = blocks // 5 model_type = version[0] if model_type == "jasper": channels_per_stage = [256, 256, 384, 512, 640, 768, 896, 1024] ksizes_per_stage = [11, 11, 13, 17, 21, 25, 29, 1] dropout_rates_per_stage = [0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4] elif model_type == "quartznet": channels_per_stage = [256, 256, 256, 512, 512, 512, 512, 1024] ksizes_per_stage = [33, 33, 39, 51, 63, 75, 87, 1] dropout_rates_per_stage = [0.0] * 8 else: raise ValueError("Unsupported Jasper family model type: {}".format(model_type)) stage_repeat = np.full((8,), 1) stage_repeat[1:-2] *= main_stage_repeat channels = sum([[a] * r for (a, r) in zip(channels_per_stage, stage_repeat)], []) ksizes = sum([[a] * r for (a, r) in zip(ksizes_per_stage, stage_repeat)], []) dropout_rates = sum([[a] * r for (a, r) in zip(dropout_rates_per_stage, stage_repeat)], []) net = Jasper( channels=channels, ksizes=ksizes, bn_eps=bn_eps, dropout_rates=dropout_rates, repeat=repeat, use_dw=use_dw, use_dr=use_dr, vocabulary=vocabulary, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def jasper5x3(**kwargs): """ Jasper 5x3 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_jasper(version=("jasper", "5x3"), model_name="jasper5x3", **kwargs) def jasper10x4(**kwargs): """ Jasper 10x4 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_jasper(version=("jasper", "10x4"), model_name="jasper10x4", **kwargs) def jasper10x5(**kwargs): """ Jasper 10x5 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_jasper(version=("jasper", "10x5"), model_name="jasper10x5", **kwargs) def _test(): import chainer chainer.global_config.train = False pretrained = False from_audio = True audio_features = 64 classes = 29 models = [ jasper5x3, jasper10x4, jasper10x5, ] for model in models: net = model( in_channels=audio_features, classes=classes, from_audio=from_audio, pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) # assert (model != jasper5x3 or weight_count == 107681053) # assert (model != jasper10x4 or weight_count == 261393693) # assert (model != jasper10x5 or weight_count == 322286877) batch = 3 aud_scale = 640 if from_audio else 1 seq_len = np.random.randint(150, 250, batch) * aud_scale seq_len_max = seq_len.max() + 2 x_shape = (batch, seq_len_max) if from_audio else (batch, audio_features, seq_len_max) x = np.random.rand(*x_shape).astype(np.float32) x_len = seq_len.astype(np.long) y, y_len = net(x, x_len) assert (y.shape[:2] == (batch, net.classes)) if from_audio: assert (y.shape[2] in range(seq_len_max // aud_scale * 2, seq_len_max // aud_scale * 2 + 9)) else: assert (y.shape[2] in [seq_len_max // 2, seq_len_max // 2 + 1]) if __name__ == "__main__": _test()
34,656
30.912523
117
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/resneta.py
""" ResNet(A) with average downsampling for ImageNet-1K, implemented in Chainer. Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. """ __all__ = ['ResNetA', 'resneta10', 'resnetabc14b', 'resneta18', 'resneta50b', 'resneta101b', 'resneta152b'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, SimpleSequential from .resnet import ResBlock, ResBottleneck from .senet import SEInitBlock class ResADownBlock(Chain): """ ResNet(A) downsample block for the identity branch of a residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. dilate : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer in bottleneck. """ def __init__(self, in_channels, out_channels, stride, dilate=1, **kwargs): super(ResADownBlock, self).__init__(**kwargs) with self.init_scope(): # self.pool = partial( # F.average_pooling_2d, # ksize=(stride if dilate == 1 else 1), # stride=(stride if dilate == 1 else 1)) self.pool = partial( F.average_pooling_nd, ksize=(stride if dilate == 1 else 1), stride=(stride if dilate == 1 else 1), pad_value=None) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=None) def __call__(self, x): x = self.pool(x) x = self.conv(x) return x class ResAUnit(Chain): """ ResNet(A) unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for the second convolution layer in bottleneck. dilate : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer in bottleneck. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, pad=1, dilate=1, bottleneck=True, conv1_stride=False, **kwargs): super(ResAUnit, self).__init__(**kwargs) self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, pad=pad, dilate=dilate, conv1_stride=conv1_stride) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_block = ResADownBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, dilate=dilate) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_block(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class ResNetA(Chain): """ ResNet(A) with average downsampling model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. dilated : bool, default False Whether to use dilation. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, dilated=False, in_channels=3, in_size=(224, 224), classes=1000, **kwargs): super(ResNetA, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", SEInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): if dilated: stride = 2 if ((j == 0) and (i != 0) and (i < 2)) else 1 dilate = (2 ** max(0, i - 1 - int(j == 0))) else: stride = 2 if (j == 0) and (i != 0) else 1 dilate = 1 setattr(stage, "unit{}".format(j + 1), ResAUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, pad=dilate, dilate=dilate, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=(in_size[0] // 32, in_size[1] // 32))) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_resneta(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ResNet(A) with average downsampling model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported ResNet(A) with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = ResNetA( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def resneta10(**kwargs): """ ResNet(A)-10 with average downsampling model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resneta(blocks=10, model_name="resneta10", **kwargs) def resnetabc14b(**kwargs): """ ResNet(A)-BC-14b with average downsampling model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resneta(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetabc14b", **kwargs) def resneta18(**kwargs): """ ResNet(A)-18 with average downsampling model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resneta(blocks=18, model_name="resneta18", **kwargs) def resneta50b(**kwargs): """ ResNet(A)-50 with average downsampling model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resneta(blocks=50, conv1_stride=False, model_name="resneta50b", **kwargs) def resneta101b(**kwargs): """ ResNet(A)-101 with average downsampling model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resneta(blocks=101, conv1_stride=False, model_name="resneta101b", **kwargs) def resneta152b(**kwargs): """ ResNet(A)-152 with average downsampling model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resneta(blocks=152, conv1_stride=False, model_name="resneta152b", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ resneta10, resnetabc14b, resneta18, resneta50b, resneta101b, resneta152b, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != resneta10 or weight_count == 5438024) assert (model != resnetabc14b or weight_count == 10084168) assert (model != resneta18 or weight_count == 11708744) assert (model != resneta50b or weight_count == 25576264) assert (model != resneta101b or weight_count == 44568392) assert (model != resneta152b or weight_count == 60212040) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
15,093
33.382688
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/resnesta.py
""" ResNeSt(A) with average downsampling for ImageNet-1K, implemented Chainer. Original paper: 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. """ __all__ = ['ResNeStA', 'resnestabc14', 'resnesta18', 'resnestabc26', 'resnesta50', 'resnesta101', 'resnesta152', 'resnesta200', 'resnesta269', 'ResNeStADownBlock'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, conv3x3_block, saconv3x3_block, SimpleSequential from .senet import SEInitBlock class ResNeStABlock(Chain): """ Simple ResNeSt(A) block for residual path in ResNeSt(A) unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. """ def __init__(self, in_channels, out_channels, stride, use_bias=False, use_bn=True, **kwargs): super(ResNeStABlock, self).__init__(**kwargs) self.resize = (stride > 1) with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn) if self.resize: self.pool = partial( F.average_pooling_nd, ksize=3, stride=stride, pad=1) self.conv2 = saconv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn, activation=None) def __call__(self, x): x = self.conv1(x) if self.resize: x = self.pool(x) x = self.conv2(x) return x class ResNeStABottleneck(Chain): """ ResNeSt(A) bottleneck block for residual path in ResNeSt(A) unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, bottleneck_factor=4, **kwargs): super(ResNeStABottleneck, self).__init__(**kwargs) self.resize = (stride > 1) mid_channels = out_channels // bottleneck_factor with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = saconv3x3_block( in_channels=mid_channels, out_channels=mid_channels) if self.resize: self.pool = partial( F.average_pooling_nd, ksize=3, stride=stride, pad=1) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) if self.resize: x = self.pool(x) x = self.conv3(x) return x class ResNeStADownBlock(Chain): """ ResNeSt(A) downsample block for the identity branch of a residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. """ def __init__(self, in_channels, out_channels, stride, **kwargs): super(ResNeStADownBlock, self).__init__(**kwargs) with self.init_scope(): self.pool = partial( F.average_pooling_nd, ksize=stride, stride=stride, pad_value=None) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=None) def __call__(self, x): x = self.pool(x) x = self.conv(x) return x class ResNeStAUnit(Chain): """ ResNeSt(A) unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. """ def __init__(self, in_channels, out_channels, stride, bottleneck=True, **kwargs): super(ResNeStAUnit, self).__init__(**kwargs) self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): if bottleneck: self.body = ResNeStABottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride) else: self.body = ResNeStABlock( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_block = ResNeStADownBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_block(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class ResNeStA(Chain): """ ResNeSt(A) with average downsampling model from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. dropout_rate : float, default 0.0 Fraction of the input units to drop. Must be a number between 0 and 1. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, dropout_rate=0.0, in_channels=3, in_size=(224, 224), classes=1000, **kwargs): super(ResNeStA, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", SEInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), ResNeStAUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=(in_size[0] // 32, in_size[1] // 32))) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) if dropout_rate > 0.0: setattr(self.output, "dropout", partial( F.dropout, ratio=dropout_rate)) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_resnesta(blocks, bottleneck=None, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ResNeSt(A) with average downsampling model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] elif blocks == 269: layers = [3, 30, 48, 8] else: raise ValueError("Unsupported ResNeSt(A) with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if blocks >= 101: init_block_channels *= 2 if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = ResNeStA( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def resnestabc14(**kwargs): """ ResNeSt(A)-BC-14 with average downsampling model from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnesta(blocks=14, bottleneck=True, model_name="resnestabc14", **kwargs) def resnesta18(**kwargs): """ ResNeSt(A)-18 with average downsampling model from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnesta(blocks=18, model_name="resnesta18", **kwargs) def resnestabc26(**kwargs): """ ResNeSt(A)-BC-26 with average downsampling model from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnesta(blocks=26, bottleneck=True, model_name="resnestabc26", **kwargs) def resnesta50(**kwargs): """ ResNeSt(A)-50 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnesta(blocks=50, model_name="resnesta50", **kwargs) def resnesta101(**kwargs): """ ResNeSt(A)-101 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnesta(blocks=101, model_name="resnesta101", **kwargs) def resnesta152(**kwargs): """ ResNeSt(A)-152 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnesta(blocks=152, model_name="resnesta152", **kwargs) def resnesta200(in_size=(256, 256), **kwargs): """ ResNeSt(A)-200 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- in_size : tuple of two ints, default (256, 256) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnesta(blocks=200, in_size=in_size, dropout_rate=0.2, model_name="resnesta200", **kwargs) def resnesta269(in_size=(320, 320), **kwargs): """ ResNeSt(A)-269 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- in_size : tuple of two ints, default (320, 320) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnesta(blocks=269, in_size=in_size, dropout_rate=0.2, model_name="resnesta269", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (resnestabc14, 224), (resnesta18, 224), (resnestabc26, 224), (resnesta50, 224), (resnesta101, 224), (resnesta152, 224), (resnesta200, 256), (resnesta269, 320), ] for model, size in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnestabc14 or weight_count == 10611688) assert (model != resnesta18 or weight_count == 12763784) assert (model != resnestabc26 or weight_count == 17069448) assert (model != resnesta50 or weight_count == 27483240) assert (model != resnesta101 or weight_count == 48275016) assert (model != resnesta152 or weight_count == 65316040) assert (model != resnesta200 or weight_count == 70201544) assert (model != resnesta269 or weight_count == 110929480) batch = 14 x = np.zeros((batch, 3, size, size), np.float32) y = net(x) assert (y.shape == (batch, 1000)) if __name__ == "__main__": _test()
18,370
31.922939
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/senet.py
""" SENet for ImageNet-1K, implemented in Chainer. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['SENet', 'senet16', 'senet28', 'senet40', 'senet52', 'senet103', 'senet154', 'SEInitBlock'] import os import math import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, conv3x3_block, SEBlock, SimpleSequential class SENetBottleneck(Chain): """ SENet bottleneck block for residual path in SENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. """ def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width): super(SENetBottleneck, self).__init__() mid_channels = out_channels // 4 D = int(math.floor(mid_channels * (bottleneck_width / 64.0))) group_width = cardinality * D group_width2 = group_width // 2 with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=group_width2) self.conv2 = conv3x3_block( in_channels=group_width2, out_channels=group_width, stride=stride, groups=cardinality) self.conv3 = conv1x1_block( in_channels=group_width, out_channels=out_channels, activation=None) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class SENetUnit(Chain): """ SENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. identity_conv3x3 : bool, default False Whether to use 3x3 convolution in the identity link. """ def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width, identity_conv3x3): super(SENetUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): self.body = SENetBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width) self.se = SEBlock(channels=out_channels) if self.resize_identity: if identity_conv3x3: self.identity_conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) else: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = self.se(x) x = x + identity x = self.activ(x) return x class SEInitBlock(Chain): """ SENet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(SEInitBlock, self).__init__() mid_channels = out_channels // 2 with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels) self.conv3 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels) self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.pool(x) return x class SENet(Chain): """ SENet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(224, 224), classes=1000): super(SENet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", SEInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() identity_conv3x3 = (i != 0) with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), SENetUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width, identity_conv3x3=identity_conv3x3)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "dropout", partial( F.dropout, ratio=0.2)) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_senet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create SENet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 16: layers = [1, 1, 1, 1] cardinality = 32 elif blocks == 28: layers = [2, 2, 2, 2] cardinality = 32 elif blocks == 40: layers = [3, 3, 3, 3] cardinality = 32 elif blocks == 52: layers = [3, 4, 6, 3] cardinality = 32 elif blocks == 103: layers = [3, 4, 23, 3] cardinality = 32 elif blocks == 154: layers = [3, 8, 36, 3] cardinality = 64 else: raise ValueError("Unsupported SENet with number of blocks: {}".format(blocks)) bottleneck_width = 4 init_block_channels = 128 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = SENet( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def senet16(**kwargs): """ SENet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_senet(blocks=16, model_name="senet16", **kwargs) def senet28(**kwargs): """ SENet-28 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_senet(blocks=28, model_name="senet28", **kwargs) def senet40(**kwargs): """ SENet-40 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_senet(blocks=40, model_name="senet40", **kwargs) def senet52(**kwargs): """ SENet-52 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_senet(blocks=52, model_name="senet52", **kwargs) def senet103(**kwargs): """ SENet-103 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_senet(blocks=103, model_name="senet103", **kwargs) def senet154(**kwargs): """ SENet-154 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_senet(blocks=154, model_name="senet154", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ senet16, senet28, senet40, senet52, senet103, senet154, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != senet16 or weight_count == 31366168) assert (model != senet28 or weight_count == 36453768) assert (model != senet40 or weight_count == 41541368) assert (model != senet52 or weight_count == 44659416) assert (model != senet103 or weight_count == 60963096) assert (model != senet154 or weight_count == 115088984) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
13,603
29.918182
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/diapreresnet_cifar.py
""" DIA-PreResNet for CIFAR/SVHN, implemented in Chainer. Original papers: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. """ __all__ = ['CIFARDIAPreResNet', 'diapreresnet20_cifar10', 'diapreresnet20_cifar100', 'diapreresnet20_svhn', 'diapreresnet56_cifar10', 'diapreresnet56_cifar100', 'diapreresnet56_svhn', 'diapreresnet110_cifar10', 'diapreresnet110_cifar100', 'diapreresnet110_svhn', 'diapreresnet164bn_cifar10', 'diapreresnet164bn_cifar100', 'diapreresnet164bn_svhn', 'diapreresnet1001_cifar10', 'diapreresnet1001_cifar100', 'diapreresnet1001_svhn', 'diapreresnet1202_cifar10', 'diapreresnet1202_cifar100', 'diapreresnet1202_svhn'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3, DualPathSequential, SimpleSequential from .preresnet import PreResActivation from .diaresnet import DIAAttention from .diapreresnet import DIAPreResUnit class CIFARDIAPreResNet(Chain): """ DIA-PreResNet model for CIFAR from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), classes=10): super(CIFARDIAPreResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential(return_two=False) attention = DIAAttention( in_x_features=channels_per_stage[0], in_h_features=channels_per_stage[0]) with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), DIAPreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False, attention=attention, hold_attention=(j == 0))) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "post_activ", PreResActivation( in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_diapreresnet_cifar(classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create DIA-PreResNet model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARDIAPreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def diapreresnet20_cifar10(classes=10, **kwargs): """ DIA-PreResNet-20 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diapreresnet20_cifar10", **kwargs) def diapreresnet20_cifar100(classes=100, **kwargs): """ DIA-PreResNet-20 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diapreresnet20_cifar100", **kwargs) def diapreresnet20_svhn(classes=10, **kwargs): """ DIA-PreResNet-20 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diapreresnet20_svhn", **kwargs) def diapreresnet56_cifar10(classes=10, **kwargs): """ DIA-PreResNet-56 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diapreresnet56_cifar10", **kwargs) def diapreresnet56_cifar100(classes=100, **kwargs): """ DIA-PreResNet-56 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diapreresnet56_cifar100", **kwargs) def diapreresnet56_svhn(classes=10, **kwargs): """ DIA-PreResNet-56 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diapreresnet56_svhn", **kwargs) def diapreresnet110_cifar10(classes=10, **kwargs): """ DIA-PreResNet-110 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diapreresnet110_cifar10", **kwargs) def diapreresnet110_cifar100(classes=100, **kwargs): """ DIA-PreResNet-110 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diapreresnet110_cifar100", **kwargs) def diapreresnet110_svhn(classes=10, **kwargs): """ DIA-PreResNet-110 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diapreresnet110_svhn", **kwargs) def diapreresnet164bn_cifar10(classes=10, **kwargs): """ DIA-PreResNet-164(BN) model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diapreresnet164bn_cifar10", **kwargs) def diapreresnet164bn_cifar100(classes=100, **kwargs): """ DIA-PreResNet-164(BN) model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diapreresnet164bn_cifar100", **kwargs) def diapreresnet164bn_svhn(classes=10, **kwargs): """ DIA-PreResNet-164(BN) model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diapreresnet164bn_svhn", **kwargs) def diapreresnet1001_cifar10(classes=10, **kwargs): """ DIA-PreResNet-1001 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diapreresnet1001_cifar10", **kwargs) def diapreresnet1001_cifar100(classes=100, **kwargs): """ DIA-PreResNet-1001 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diapreresnet1001_cifar100", **kwargs) def diapreresnet1001_svhn(classes=10, **kwargs): """ DIA-PreResNet-1001 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diapreresnet1001_svhn", **kwargs) def diapreresnet1202_cifar10(classes=10, **kwargs): """ DIA-PreResNet-1202 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diapreresnet1202_cifar10", **kwargs) def diapreresnet1202_cifar100(classes=100, **kwargs): """ DIA-PreResNet-1202 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diapreresnet1202_cifar100", **kwargs) def diapreresnet1202_svhn(classes=10, **kwargs): """ DIA-PreResNet-1202 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diapreresnet1202_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (diapreresnet20_cifar10, 10), (diapreresnet20_cifar100, 100), (diapreresnet20_svhn, 10), (diapreresnet56_cifar10, 10), (diapreresnet56_cifar100, 100), (diapreresnet56_svhn, 10), (diapreresnet110_cifar10, 10), (diapreresnet110_cifar100, 100), (diapreresnet110_svhn, 10), (diapreresnet164bn_cifar10, 10), (diapreresnet164bn_cifar100, 100), (diapreresnet164bn_svhn, 10), (diapreresnet1001_cifar10, 10), (diapreresnet1001_cifar100, 100), (diapreresnet1001_svhn, 10), (diapreresnet1202_cifar10, 10), (diapreresnet1202_cifar100, 100), (diapreresnet1202_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != diapreresnet20_cifar10 or weight_count == 286674) assert (model != diapreresnet20_cifar100 or weight_count == 292524) assert (model != diapreresnet20_svhn or weight_count == 286674) assert (model != diapreresnet56_cifar10 or weight_count == 869970) assert (model != diapreresnet56_cifar100 or weight_count == 875820) assert (model != diapreresnet56_svhn or weight_count == 869970) assert (model != diapreresnet110_cifar10 or weight_count == 1744914) assert (model != diapreresnet110_cifar100 or weight_count == 1750764) assert (model != diapreresnet110_svhn or weight_count == 1744914) assert (model != diapreresnet164bn_cifar10 or weight_count == 1922106) assert (model != diapreresnet164bn_cifar100 or weight_count == 1945236) assert (model != diapreresnet164bn_svhn or weight_count == 1922106) assert (model != diapreresnet1001_cifar10 or weight_count == 10546554) assert (model != diapreresnet1001_cifar100 or weight_count == 10569684) assert (model != diapreresnet1001_svhn or weight_count == 10546554) assert (model != diapreresnet1202_cifar10 or weight_count == 19438226) assert (model != diapreresnet1202_cifar100 or weight_count == 19444076) assert (model != diapreresnet1202_svhn or weight_count == 19438226) x = np.zeros((1, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (1, classes)) if __name__ == "__main__": _test()
20,602
36.665448
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/simplepose_coco.py
""" SimplePose for COCO Keypoint, implemented in Chainer. Original paper: 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. """ __all__ = ['SimplePose', 'simplepose_resnet18_coco', 'simplepose_resnet50b_coco', 'simplepose_resnet101b_coco', 'simplepose_resnet152b_coco', 'simplepose_resneta50b_coco', 'simplepose_resneta101b_coco', 'simplepose_resneta152b_coco'] import os from chainer import Chain from chainer.serializers import load_npz from .common import DeconvBlock, conv1x1, HeatmapMaxDetBlock, SimpleSequential from .resnet import resnet18, resnet50b, resnet101b, resnet152b from .resneta import resneta50b, resneta101b, resneta152b class SimplePose(Chain): """ SimplePose model from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. channels : list of int Number of output channels for each decoder unit. return_heatmap : bool, default False Whether to return only heatmap. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (256, 192) Spatial size of the expected input image. keypoints : int, default 17 Number of keypoints. """ def __init__(self, backbone, backbone_out_channels, channels, return_heatmap=False, in_channels=3, in_size=(256, 192), keypoints=17, **kwargs): super(SimplePose, self).__init__(**kwargs) assert (in_channels == 3) self.in_size = in_size self.keypoints = keypoints self.return_heatmap = return_heatmap with self.init_scope(): self.backbone = backbone in_channels = backbone_out_channels self.decoder = SimpleSequential() with self.decoder.init_scope(): for i, out_channels in enumerate(channels): setattr(self.decoder, "unit{}".format(i + 1), DeconvBlock( in_channels=in_channels, out_channels=out_channels, ksize=4, stride=2, pad=1)) in_channels = out_channels setattr(self.decoder, "final_block", conv1x1( in_channels=in_channels, out_channels=keypoints, use_bias=True)) self.heatmap_max_det = HeatmapMaxDetBlock() def __call__(self, x): x = self.backbone(x) heatmap = self.decoder(x) if self.return_heatmap: return heatmap else: keypoints = self.heatmap_max_det(heatmap) return keypoints def get_simplepose(backbone, backbone_out_channels, keypoints, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create SimplePose model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. keypoints : int Number of keypoints. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ channels = [256, 256, 256] net = SimplePose( backbone=backbone, backbone_out_channels=backbone_out_channels, channels=channels, keypoints=keypoints, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def simplepose_resnet18_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose model on the base of ResNet-18 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnet18(pretrained=pretrained_backbone).features del backbone.final_pool return get_simplepose(backbone=backbone, backbone_out_channels=512, keypoints=keypoints, model_name="simplepose_resnet18_coco", **kwargs) def simplepose_resnet50b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose model on the base of ResNet-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnet50b(pretrained=pretrained_backbone).features del backbone.final_pool return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resnet50b_coco", **kwargs) def simplepose_resnet101b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose model on the base of ResNet-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnet101b(pretrained=pretrained_backbone).features del backbone.final_pool return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resnet101b_coco", **kwargs) def simplepose_resnet152b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose model on the base of ResNet-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnet152b(pretrained=pretrained_backbone).features del backbone.final_pool return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resnet152b_coco", **kwargs) def simplepose_resneta50b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose model on the base of ResNet(A)-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resneta50b(pretrained=pretrained_backbone).features del backbone.final_pool return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resneta50b_coco", **kwargs) def simplepose_resneta101b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose model on the base of ResNet(A)-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resneta101b(pretrained=pretrained_backbone).features del backbone.final_pool return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resneta101b_coco", **kwargs) def simplepose_resneta152b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose model on the base of ResNet(A)-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resneta152b(pretrained=pretrained_backbone).features del backbone.final_pool return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resneta152b_coco", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False in_size = (256, 192) keypoints = 17 return_heatmap = False pretrained = False models = [ simplepose_resnet18_coco, simplepose_resnet50b_coco, simplepose_resnet101b_coco, simplepose_resnet152b_coco, simplepose_resneta50b_coco, simplepose_resneta101b_coco, simplepose_resneta152b_coco, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != simplepose_resnet18_coco or weight_count == 15376721) assert (model != simplepose_resnet50b_coco or weight_count == 33999697) assert (model != simplepose_resnet101b_coco or weight_count == 52991825) assert (model != simplepose_resnet152b_coco or weight_count == 68635473) assert (model != simplepose_resneta50b_coco or weight_count == 34018929) assert (model != simplepose_resneta101b_coco or weight_count == 53011057) assert (model != simplepose_resneta152b_coco or weight_count == 68654705) batch = 14 x = np.random.rand(batch, 3, in_size[0], in_size[1]).astype(np.float32) y = net(x) assert ((y.shape[0] == batch) and (y.shape[1] == keypoints)) if return_heatmap: assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4)) else: assert (y.shape[2] == 3) if __name__ == "__main__": _test()
12,665
37.150602
118
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/vovnet.py
""" VoVNet for ImageNet-1K, implemented in Chainer. Original paper: 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. """ __all__ = ['VoVNet', 'vovnet27s', 'vovnet39', 'vovnet57'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, conv3x3_block, SequentialConcurrent, SimpleSequential class VoVUnit(Chain): """ VoVNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. branch_channels : int Number of output channels for each branch. num_branches : int Number of branches. resize : bool Whether to use resize block. use_residual : bool Whether to use residual block. """ def __init__(self, in_channels, out_channels, branch_channels, num_branches, resize, use_residual): super(VoVUnit, self).__init__() self.resize = resize self.use_residual = use_residual with self.init_scope(): if self.resize: self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, cover_all=True) self.branches = SequentialConcurrent() with self.branches.init_scope(): branch_in_channels = in_channels for i in range(num_branches): setattr(self.branches, "branch{}".format(i + 1), conv3x3_block( in_channels=branch_in_channels, out_channels=branch_channels)) branch_in_channels = branch_channels self.concat_conv = conv1x1_block( in_channels=(in_channels + num_branches * branch_channels), out_channels=out_channels) def __call__(self, x): if self.resize: x = self.pool(x) if self.use_residual: identity = x x = self.branches(x) x = self.concat_conv(x) if self.use_residual: x = x + identity return x class VoVInitBlock(Chain): """ VoVNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(VoVInitBlock, self).__init__() mid_channels = out_channels // 2 with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels) self.conv3 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, stride=2) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class VoVNet(Chain): """ VoVNet model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. Parameters: ---------- channels : list of list of int Number of output channels for each unit. branch_channels : list of list of int Number of branch output channels for each unit. num_branches : int Number of branches for the each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, branch_channels, num_branches, in_channels=3, in_size=(224, 224), classes=1000): super(VoVNet, self).__init__() self.in_size = in_size self.classes = classes init_block_channels = 128 with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", VoVInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): use_residual = (j != 0) resize = (j == 0) and (i != 0) setattr(stage, "unit{}".format(j + 1), VoVUnit( in_channels=in_channels, out_channels=out_channels, branch_channels=branch_channels[i][j], num_branches=num_branches, resize=resize, use_residual=use_residual)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_vovnet(blocks, slim=False, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. slim : bool, default False Whether to use a slim model. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 27: layers = [1, 1, 1, 1] elif blocks == 39: layers = [1, 1, 2, 2] elif blocks == 57: layers = [1, 1, 4, 3] else: raise ValueError("Unsupported VoVNet with number of blocks: {}".format(blocks)) assert (sum(layers) * 6 + 3 == blocks) num_branches = 5 channels_per_layers = [256, 512, 768, 1024] branch_channels_per_layers = [128, 160, 192, 224] if slim: channels_per_layers = [ci // 2 for ci in channels_per_layers] branch_channels_per_layers = [ci // 2 for ci in branch_channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] branch_channels = [[ci] * li for (ci, li) in zip(branch_channels_per_layers, layers)] net = VoVNet( channels=channels, branch_channels=branch_channels, num_branches=num_branches, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def vovnet27s(**kwargs): """ VoVNet-27-slim model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vovnet(blocks=27, slim=True, model_name="vovnet27s", **kwargs) def vovnet39(**kwargs): """ VoVNet-39 model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vovnet(blocks=39, model_name="vovnet39", **kwargs) def vovnet57(**kwargs): """ VoVNet-57 model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_vovnet(blocks=57, model_name="vovnet57", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ vovnet27s, vovnet39, vovnet57, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != vovnet27s or weight_count == 3525736) assert (model != vovnet39 or weight_count == 22600296) assert (model != vovnet57 or weight_count == 36640296) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
10,504
30.930091
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/espnetv2.py
""" ESPNetv2 for ImageNet-1K, implemented in Chainer. Original paper: 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. """ __all__ = ['ESPNetv2', 'espnetv2_wd2', 'espnetv2_w1', 'espnetv2_w5d4', 'espnetv2_w3d2', 'espnetv2_w2'] import os import math import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3, conv1x1_block, conv3x3_block, DualPathSequential, SimpleSequential class PreActivation(Chain): """ PreResNet like pure pre-activation block without convolution layer. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(PreActivation, self).__init__() with self.init_scope(): self.bn = L.BatchNormalization( size=in_channels, eps=1e-5) self.activ = L.PReLU(shape=(in_channels,)) def __call__(self, x): x = self.bn(x) x = self.activ(x) return x class ShortcutBlock(Chain): """ ESPNetv2 shortcut block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ShortcutBlock, self).__init__() with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=in_channels, activation=(lambda: L.PReLU(shape=(in_channels,)))) self.conv2 = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=None) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class HierarchicalConcurrent(SimpleSequential): """ A container for hierarchical concatenation of modules on the base of the sequential container. Parameters: ---------- axis : int, default 1 The axis on which to concatenate the outputs. """ def __init__(self, axis=1): super(HierarchicalConcurrent, self).__init__() self.axis = axis def __call__(self, x): out = [] y_prev = None for name in self.layer_names: y = self[name](x,) if y_prev is not None: y += y_prev out.append(y) y_prev = y out = F.concat(tuple(out), axis=self.axis) return out class ESPBlock(Chain): """ ESPNetv2 block (so-called EESP block). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the branch convolution layers. dilates : list of int Dilation values for branches. """ def __init__(self, in_channels, out_channels, stride, dilates): super(ESPBlock, self).__init__() num_branches = len(dilates) assert (out_channels % num_branches == 0) self.downsample = (stride != 1) mid_channels = out_channels // num_branches with self.init_scope(): self.reduce_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, groups=num_branches, activation=(lambda: L.PReLU(shape=(mid_channels,)))) self.branches = HierarchicalConcurrent() with self.branches.init_scope(): for i in range(num_branches): setattr(self.branches, "branch{}".format(i + 1), conv3x3( in_channels=mid_channels, out_channels=mid_channels, stride=stride, pad=dilates[i], dilate=dilates[i], groups=mid_channels)) self.merge_conv = conv1x1_block( in_channels=out_channels, out_channels=out_channels, groups=num_branches, activation=None) self.preactiv = PreActivation(in_channels=out_channels) if not self.downsample: self.activ = L.PReLU(shape=(out_channels,)) def __call__(self, x, x0): y = self.reduce_conv(x) y = self.branches(y) y = self.preactiv(y) y = self.merge_conv(y) if not self.downsample: y = y + x y = self.activ(y) return y, x0 class DownsampleBlock(Chain): """ ESPNetv2 downsample block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. x0_channels : int Number of input channels for shortcut. dilates : list of int Dilation values for branches in EESP block. """ def __init__(self, in_channels, out_channels, x0_channels, dilates): super(DownsampleBlock, self).__init__() inc_channels = out_channels - in_channels with self.init_scope(): self.pool = partial( F.average_pooling_2d, ksize=3, stride=2, pad=1) self.eesp = ESPBlock( in_channels=in_channels, out_channels=inc_channels, stride=2, dilates=dilates) self.shortcut_block = ShortcutBlock( in_channels=x0_channels, out_channels=out_channels) self.activ = L.PReLU(shape=(out_channels,)) def __call__(self, x, x0): y1 = self.pool(x) y2, _ = self.eesp(x, None) x = F.concat((y1, y2), axis=1) x0 = self.pool(x0) y3 = self.shortcut_block(x0) x = x + y3 x = self.activ(x) return x, x0 class ESPInitBlock(Chain): """ ESPNetv2 initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ESPInitBlock, self).__init__() with self.init_scope(): self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, activation=(lambda: L.PReLU(shape=(out_channels,)))) self.pool = partial( F.average_pooling_2d, ksize=3, stride=2, pad=1) def __call__(self, x, x0): x = self.conv(x) x0 = self.pool(x0) return x, x0 class ESPFinalBlock(Chain): """ ESPNetv2 final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. final_groups : int Number of groups in the last convolution layer. """ def __init__(self, in_channels, out_channels, final_groups): super(ESPFinalBlock, self).__init__() with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=in_channels, groups=in_channels, activation=(lambda: L.PReLU(shape=(in_channels,)))) self.conv2 = conv1x1_block( in_channels=in_channels, out_channels=out_channels, groups=final_groups, activation=(lambda: L.PReLU(shape=(out_channels,)))) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class ESPNetv2(Chain): """ ESPNetv2 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final unit. final_block_groups : int Number of groups for the final unit. dilates : list of list of list of int Dilation values for branches in each unit. dropout_rate : float, default 0.2 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, final_block_groups, dilates, dropout_rate=0.2, in_channels=3, in_size=(224, 224), classes=1000): super(ESPNetv2, self).__init__() self.in_size = in_size self.classes = classes x0_channels = in_channels with self.init_scope(): self.features = DualPathSequential( return_two=False, first_ordinals=0, last_ordinals=2) with self.features.init_scope(): setattr(self.features, "init_block", ESPInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): if j == 0: unit = DownsampleBlock( in_channels=in_channels, out_channels=out_channels, x0_channels=x0_channels, dilates=dilates[i][j]) else: unit = ESPBlock( in_channels=in_channels, out_channels=out_channels, stride=1, dilates=dilates[i][j]) setattr(stage, "unit{}".format(j + 1), unit) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", ESPFinalBlock( in_channels=in_channels, out_channels=final_block_channels, final_groups=final_block_groups)) in_channels = final_block_channels setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "dropout", partial( F.dropout, ratio=0.2)) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x, x) x = self.output(x) return x def get_espnetv2(width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ESPNetv2 model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert (width_scale <= 2.0) branches = 4 layers = [1, 4, 8, 4] max_dilation_list = [6, 5, 4, 3, 2] max_dilations = [[max_dilation_list[i]] + [max_dilation_list[i + 1]] * (li - 1) for (i, li) in enumerate(layers)] dilations = [[sorted([k + 1 if k < dij else 1 for k in range(branches)]) for dij in di] for di in max_dilations] base_channels = 32 weighed_base_channels = math.ceil(float(math.floor(base_channels * width_scale)) / branches) * branches channels_per_layers = [weighed_base_channels * pow(2, i + 1) for i in range(len(layers))] init_block_channels = base_channels if weighed_base_channels > base_channels else weighed_base_channels final_block_channels = 1024 if width_scale <= 1.5 else 1280 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = ESPNetv2( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, final_block_groups=branches, dilates=dilations, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def espnetv2_wd2(**kwargs): """ ESPNetv2 x0.5 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=0.5, model_name="espnetv2_wd2", **kwargs) def espnetv2_w1(**kwargs): """ ESPNetv2 x1.0 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=1.0, model_name="espnetv2_w1", **kwargs) def espnetv2_w5d4(**kwargs): """ ESPNetv2 x1.25 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=1.25, model_name="espnetv2_w5d4", **kwargs) def espnetv2_w3d2(**kwargs): """ ESPNetv2 x1.5 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=1.5, model_name="espnetv2_w3d2", **kwargs) def espnetv2_w2(**kwargs): """ ESPNetv2 x2.0 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=2.0, model_name="espnetv2_w2", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ espnetv2_wd2, espnetv2_w1, espnetv2_w5d4, espnetv2_w3d2, espnetv2_w2, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != espnetv2_wd2 or weight_count == 1241092) assert (model != espnetv2_w1 or weight_count == 1669592) assert (model != espnetv2_w5d4 or weight_count == 1964832) assert (model != espnetv2_w3d2 or weight_count == 2314120) assert (model != espnetv2_w2 or weight_count == 3497144) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
17,736
31.366788
118
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/shufflenet.py
""" ShuffleNet for ImageNet-1K, implemented in Chainer. Original paper: 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. """ __all__ = ['ShuffleNet', 'shufflenet_g1_w1', 'shufflenet_g2_w1', 'shufflenet_g3_w1', 'shufflenet_g4_w1', 'shufflenet_g8_w1', 'shufflenet_g1_w3d4', 'shufflenet_g3_w3d4', 'shufflenet_g1_wd2', 'shufflenet_g3_wd2', 'shufflenet_g1_wd4', 'shufflenet_g3_wd4'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, conv3x3, depthwise_conv3x3, SimpleSequential, ChannelShuffle class ShuffleUnit(Chain): """ ShuffleNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. groups : int Number of groups in convolution layers. downsample : bool Whether do downsample. ignore_group : bool Whether ignore group value in the first convolution layer. """ def __init__(self, in_channels, out_channels, groups, downsample, ignore_group): super(ShuffleUnit, self).__init__() self.downsample = downsample mid_channels = out_channels // 4 if downsample: out_channels -= in_channels with self.init_scope(): self.compress_conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels, groups=(1 if ignore_group else groups)) self.compress_bn1 = L.BatchNormalization(size=mid_channels) self.c_shuffle = ChannelShuffle( channels=mid_channels, groups=groups) self.dw_conv2 = depthwise_conv3x3( channels=mid_channels, stride=(2 if self.downsample else 1)) self.dw_bn2 = L.BatchNormalization(size=mid_channels) self.expand_conv3 = conv1x1( in_channels=mid_channels, out_channels=out_channels, groups=groups) self.expand_bn3 = L.BatchNormalization(size=out_channels) if downsample: self.avgpool = partial( F.average_pooling_2d, ksize=3, stride=2, pad=1) self.activ = F.relu def __call__(self, x): identity = x x = self.compress_conv1(x) x = self.compress_bn1(x) x = self.activ(x) x = self.c_shuffle(x) x = self.dw_conv2(x) x = self.dw_bn2(x) x = self.expand_conv3(x) x = self.expand_bn3(x) if self.downsample: identity = self.avgpool(identity) x = F.concat((x, identity), axis=1) else: x = x + identity x = self.activ(x) return x class ShuffleInitBlock(Chain): """ ShuffleNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ShuffleInitBlock, self).__init__() with self.init_scope(): self.conv = conv3x3( in_channels=in_channels, out_channels=out_channels, stride=2) self.bn = L.BatchNormalization(size=out_channels) self.activ = F.relu self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) def __call__(self, x): x = self.conv(x) x = self.bn(x) x = self.activ(x) x = self.pool(x) return x class ShuffleNet(Chain): """ ShuffleNet model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. groups : int Number of groups in convolution layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, groups, in_channels=3, in_size=(224, 224), classes=1000): super(ShuffleNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", ShuffleInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): downsample = (j == 0) ignore_group = (i == 0) and (j == 0) setattr(stage, "unit{}".format(j + 1), ShuffleUnit( in_channels=in_channels, out_channels=out_channels, groups=groups, downsample=downsample, ignore_group=ignore_group)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_shufflenet(groups, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ShuffleNet model with specific parameters. Parameters: ---------- groups : int Number of groups in convolution layers. width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ init_block_channels = 24 layers = [4, 8, 4] if groups == 1: channels_per_layers = [144, 288, 576] elif groups == 2: channels_per_layers = [200, 400, 800] elif groups == 3: channels_per_layers = [240, 480, 960] elif groups == 4: channels_per_layers = [272, 544, 1088] elif groups == 8: channels_per_layers = [384, 768, 1536] else: raise ValueError("The {} of groups is not supported".format(groups)) channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = int(init_block_channels * width_scale) net = ShuffleNet( channels=channels, init_block_channels=init_block_channels, groups=groups, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def shufflenet_g1_w1(**kwargs): """ ShuffleNet 1x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenet(groups=1, width_scale=1.0, model_name="shufflenet_g1_w1", **kwargs) def shufflenet_g2_w1(**kwargs): """ ShuffleNet 1x (g=2) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenet(groups=2, width_scale=1.0, model_name="shufflenet_g2_w1", **kwargs) def shufflenet_g3_w1(**kwargs): """ ShuffleNet 1x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenet(groups=3, width_scale=1.0, model_name="shufflenet_g3_w1", **kwargs) def shufflenet_g4_w1(**kwargs): """ ShuffleNet 1x (g=4) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenet(groups=4, width_scale=1.0, model_name="shufflenet_g4_w1", **kwargs) def shufflenet_g8_w1(**kwargs): """ ShuffleNet 1x (g=8) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenet(groups=8, width_scale=1.0, model_name="shufflenet_g8_w1", **kwargs) def shufflenet_g1_w3d4(**kwargs): """ ShuffleNet 0.75x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenet(groups=1, width_scale=0.75, model_name="shufflenet_g1_w3d4", **kwargs) def shufflenet_g3_w3d4(**kwargs): """ ShuffleNet 0.75x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenet(groups=3, width_scale=0.75, model_name="shufflenet_g3_w3d4", **kwargs) def shufflenet_g1_wd2(**kwargs): """ ShuffleNet 0.5x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenet(groups=1, width_scale=0.5, model_name="shufflenet_g1_wd2", **kwargs) def shufflenet_g3_wd2(**kwargs): """ ShuffleNet 0.5x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenet(groups=3, width_scale=0.5, model_name="shufflenet_g3_wd2", **kwargs) def shufflenet_g1_wd4(**kwargs): """ ShuffleNet 0.25x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenet(groups=1, width_scale=0.25, model_name="shufflenet_g1_wd4", **kwargs) def shufflenet_g3_wd4(**kwargs): """ ShuffleNet 0.25x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_shufflenet(groups=3, width_scale=0.25, model_name="shufflenet_g3_wd4", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ shufflenet_g1_w1, shufflenet_g2_w1, shufflenet_g3_w1, shufflenet_g4_w1, shufflenet_g8_w1, shufflenet_g1_w3d4, shufflenet_g3_w3d4, shufflenet_g1_wd2, shufflenet_g3_wd2, shufflenet_g1_wd4, shufflenet_g3_wd4, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != shufflenet_g1_w1 or weight_count == 1531936) assert (model != shufflenet_g2_w1 or weight_count == 1733848) assert (model != shufflenet_g3_w1 or weight_count == 1865728) assert (model != shufflenet_g4_w1 or weight_count == 1968344) assert (model != shufflenet_g8_w1 or weight_count == 2434768) assert (model != shufflenet_g1_w3d4 or weight_count == 975214) assert (model != shufflenet_g3_w3d4 or weight_count == 1238266) assert (model != shufflenet_g1_wd2 or weight_count == 534484) assert (model != shufflenet_g3_wd2 or weight_count == 718324) assert (model != shufflenet_g1_wd4 or weight_count == 209746) assert (model != shufflenet_g3_wd4 or weight_count == 305902) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
16,240
32.906054
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/bamresnet.py
""" BAM-ResNet for ImageNet-1K, implemented in Chainer. Original paper: 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. """ __all__ = ['BamResNet', 'bam_resnet18', 'bam_resnet34', 'bam_resnet50', 'bam_resnet101', 'bam_resnet152'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import SimpleSequential, conv1x1, conv1x1_block, conv3x3_block from .resnet import ResInitBlock, ResUnit class DenseBlock(Chain): """ Standard dense block with Batch normalization and ReLU activation. Parameters: ---------- in_channels : int Number of input features. out_channels : int Number of output features. """ def __init__(self, in_channels, out_channels): super(DenseBlock, self).__init__() with self.init_scope(): self.fc = L.Linear( in_size=in_channels, out_size=out_channels) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5) self.activ = F.relu def __call__(self, x): x = self.fc(x) x = self.bn(x) x = self.activ(x) return x class ChannelGate(Chain): """ BAM channel gate block. Parameters: ---------- channels : int Number of input/output channels. reduction_ratio : int, default 16 Channel reduction ratio. num_layers : int, default 1 Number of dense blocks. """ def __init__(self, channels, reduction_ratio=16, num_layers=1): super(ChannelGate, self).__init__() mid_channels = channels // reduction_ratio with self.init_scope(): self.init_fc = DenseBlock( in_channels=channels, out_channels=mid_channels) self.main_fcs = SimpleSequential() with self.main_fcs.init_scope(): for i in range(num_layers - 1): setattr(self.main_fcs, "fc{}".format(i + 1), DenseBlock( in_channels=mid_channels, out_channels=mid_channels)) self.final_fc = L.Linear( in_size=mid_channels, out_size=channels) def __call__(self, x): input_shape = x.shape x = F.average_pooling_2d(x, ksize=x.shape[2:]) x = F.reshape(x, shape=(x.shape[0], -1)) x = self.init_fc(x) x = self.main_fcs(x) x = self.final_fc(x) x = F.broadcast_to(F.expand_dims(F.expand_dims(x, axis=2), axis=3), input_shape) return x class SpatialGate(Chain): """ BAM spatial gate block. Parameters: ---------- channels : int Number of input/output channels. reduction_ratio : int, default 16 Channel reduction ratio. num_dil_convs : int, default 2 Number of dilated convolutions. dilate : int, default 4 Dilation/padding value for corresponding convolutions. """ def __init__(self, channels, reduction_ratio=16, num_dil_convs=2, dilate=4): super(SpatialGate, self).__init__() mid_channels = channels // reduction_ratio with self.init_scope(): self.init_conv = conv1x1_block( in_channels=channels, out_channels=mid_channels, stride=1, use_bias=True) self.dil_convs = SimpleSequential() with self.dil_convs.init_scope(): for i in range(num_dil_convs): setattr(self.dil_convs, "conv{}".format(i + 1), conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=1, pad=dilate, dilate=dilate, use_bias=True)) self.final_conv = conv1x1( in_channels=mid_channels, out_channels=1, stride=1, use_bias=True) def __call__(self, x): input_shape = x.shape x = self.init_conv(x) x = self.dil_convs(x) x = self.final_conv(x) x = F.broadcast_to(x, input_shape) return x class BamBlock(Chain): """ BAM attention block for BAM-ResNet. Parameters: ---------- channels : int Number of input/output channels. """ def __init__(self, channels): super(BamBlock, self).__init__() with self.init_scope(): self.ch_att = ChannelGate(channels=channels) self.sp_att = SpatialGate(channels=channels) def __call__(self, x): att = 1 + F.sigmoid(self.ch_att(x) * self.sp_att(x)) x = x * att return x class BamResUnit(Chain): """ BAM-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. """ def __init__(self, in_channels, out_channels, stride, bottleneck): super(BamResUnit, self).__init__() self.use_bam = (stride != 1) with self.init_scope(): if self.use_bam: self.bam = BamBlock(channels=in_channels) self.res_unit = ResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False) def __call__(self, x): if self.use_bam: x = self.bam(x) x = self.res_unit(x) return x class BamResNet(Chain): """ BAM-ResNet model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(224, 224), classes=1000): super(BamResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), BamResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_resnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create BAM-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. use_se : bool Whether to use SE block. width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 18: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] else: raise ValueError("Unsupported BAM-ResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 if blocks < 50: channels_per_layers = [64, 128, 256, 512] bottleneck = False else: channels_per_layers = [256, 512, 1024, 2048] bottleneck = True channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = BamResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def bam_resnet18(**kwargs): """ BAM-ResNet-18 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, model_name="bam_resnet18", **kwargs) def bam_resnet34(**kwargs): """ BAM-ResNet-34 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=34, model_name="bam_resnet34", **kwargs) def bam_resnet50(**kwargs): """ BAM-ResNet-50 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=50, model_name="bam_resnet50", **kwargs) def bam_resnet101(**kwargs): """ BAM-ResNet-101 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=101, model_name="bam_resnet101", **kwargs) def bam_resnet152(**kwargs): """ BAM-ResNet-152 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet(blocks=152, model_name="bam_resnet152", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ bam_resnet18, bam_resnet34, bam_resnet50, bam_resnet101, bam_resnet152, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != bam_resnet18 or weight_count == 11712503) assert (model != bam_resnet34 or weight_count == 21820663) assert (model != bam_resnet50 or weight_count == 25915099) assert (model != bam_resnet101 or weight_count == 44907227) assert (model != bam_resnet152 or weight_count == 60550875) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
13,941
29.845133
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/resattnet.py
""" ResAttNet for ImageNet-1K, implemented in Chainer. Original paper: 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. """ __all__ = ['ResAttNet', 'resattnet56', 'resattnet92', 'resattnet128', 'resattnet164', 'resattnet200', 'resattnet236', 'resattnet452'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, conv7x7_block, pre_conv1x1_block, pre_conv3x3_block, Hourglass, SimpleSequential class PreResBottleneck(Chain): """ PreResNet bottleneck block for residual path in PreResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(PreResBottleneck, self).__init__() mid_channels = out_channels // 4 with self.init_scope(): self.conv1 = pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels, return_preact=True) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride) self.conv3 = pre_conv1x1_block( in_channels=mid_channels, out_channels=out_channels) def __call__(self, x): x, x_pre_activ = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x, x_pre_activ class ResBlock(Chain): """ Residual block with pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. """ def __init__(self, in_channels, out_channels, stride=1): super(ResBlock, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): self.body = PreResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, stride=stride) def __call__(self, x): identity = x x, x_pre_activ = self.body(x) if self.resize_identity: identity = self.identity_conv(x_pre_activ) x = x + identity return x class InterpolationBlock(Chain): """ Interpolation block. Parameters: ---------- size : tuple of 2 int Spatial size of the output tensor for the bilinear upsampling operation. """ def __init__(self, size): super(InterpolationBlock, self).__init__() self.size = size def __call__(self, x): return F.resize_images(x, output_shape=self.size) class DoubleSkipBlock(Chain): """ Double skip connection block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(DoubleSkipBlock, self).__init__() with self.init_scope(): self.skip1 = ResBlock( in_channels=in_channels, out_channels=out_channels) def __call__(self, x): x = x + self.skip1(x) return x class ResBlockSequence(Chain): """ Sequence of residual blocks with pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. length : int Length of sequence. """ def __init__(self, in_channels, out_channels, length): super(ResBlockSequence, self).__init__() with self.init_scope(): self.blocks = SimpleSequential() with self.blocks.init_scope(): for i in range(length): setattr(self.blocks, "block{}".format(i + 1), ResBlock( in_channels=in_channels, out_channels=out_channels)) def __call__(self, x): x = self.blocks(x) return x class DownAttBlock(Chain): """ Down sub-block for hourglass of attention block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. length : int Length of residual blocks list. """ def __init__(self, in_channels, out_channels, length): super(DownAttBlock, self).__init__() with self.init_scope(): self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) self.res_blocks = ResBlockSequence( in_channels=in_channels, out_channels=out_channels, length=length) def __call__(self, x): x = self.pool(x) x = self.res_blocks(x) return x class UpAttBlock(Chain): """ Up sub-block for hourglass of attention block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. length : int Length of residual blocks list. size : tuple of 2 int Spatial size of the output tensor for the bilinear upsampling operation. """ def __init__(self, in_channels, out_channels, length, size): super(UpAttBlock, self).__init__() with self.init_scope(): self.res_blocks = ResBlockSequence( in_channels=in_channels, out_channels=out_channels, length=length) self.upsample = InterpolationBlock(size) def __call__(self, x): x = self.res_blocks(x) x = self.upsample(x) return x class MiddleAttBlock(Chain): """ Middle sub-block for attention block. Parameters: ---------- channels : int Number of input/output channels. """ def __init__(self, channels): super(MiddleAttBlock, self).__init__() with self.init_scope(): self.conv1 = pre_conv1x1_block( in_channels=channels, out_channels=channels) self.conv2 = pre_conv1x1_block( in_channels=channels, out_channels=channels) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = F.sigmoid(x) return x class AttBlock(Chain): """ Attention block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. hourglass_depth : int Depth of hourglass block. att_scales : list of int Attention block specific scales. in_size : tuple of 2 int Spatial size of the input tensor for the bilinear upsampling operation. """ def __init__(self, in_channels, out_channels, hourglass_depth, att_scales, in_size): super(AttBlock, self).__init__() assert (len(att_scales) == 3) scale_factor = 2 scale_p, scale_t, scale_r = att_scales with self.init_scope(): self.init_blocks = ResBlockSequence( in_channels=in_channels, out_channels=out_channels, length=scale_p) down_seq = SimpleSequential() up_seq = SimpleSequential() skip_seq = SimpleSequential() for i in range(hourglass_depth): with down_seq.init_scope(): setattr(down_seq, "down{}".format(i + 1), DownAttBlock( in_channels=in_channels, out_channels=out_channels, length=scale_r)) with up_seq.init_scope(): setattr(up_seq, "up{}".format(i + 1), UpAttBlock( in_channels=in_channels, out_channels=out_channels, length=scale_r, size=in_size)) in_size = tuple([x // scale_factor for x in in_size]) with skip_seq.init_scope(): if i == 0: setattr(skip_seq, "skip1", ResBlockSequence( in_channels=in_channels, out_channels=out_channels, length=scale_t)) else: setattr(skip_seq, "skip{}".format(i + 1), DoubleSkipBlock( in_channels=in_channels, out_channels=out_channels)) self.hg = Hourglass( down_seq=down_seq, up_seq=up_seq, skip_seq=skip_seq, return_first_skip=True) self.middle_block = MiddleAttBlock(channels=out_channels) self.final_block = ResBlock( in_channels=in_channels, out_channels=out_channels) def __call__(self, x): x = self.init_blocks(x) x, y = self.hg(x) x = self.middle_block(x) x = (1 + x) * y x = self.final_block(x) return x class ResAttInitBlock(Chain): """ ResAttNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ResAttInitBlock, self).__init__() with self.init_scope(): self.conv = conv7x7_block( in_channels=in_channels, out_channels=out_channels, stride=2) self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) def __call__(self, x): x = self.conv(x) x = self.pool(x) return x class PreActivation(Chain): """ Pre-activation block without convolution layer. It's used by itself as the final block in PreResNet. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(PreActivation, self).__init__() with self.init_scope(): self.bn = L.BatchNormalization( size=in_channels, eps=1e-5) self.activ = F.relu def __call__(self, x): x = self.bn(x) x = self.activ(x) return x class ResAttNet(Chain): """ ResAttNet model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. attentions : list of list of int Whether to use a attention unit or residual one. att_scales : list of int Attention block specific scales. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, attentions, att_scales, in_channels=3, in_size=(224, 224), classes=1000): super(ResAttNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", ResAttInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels in_size = tuple([x // 4 for x in in_size]) for i, channels_per_stage in enumerate(channels): hourglass_depth = len(channels) - 1 - i stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 if attentions[i][j]: setattr(stage, "unit{}".format(j + 1), AttBlock( in_channels=in_channels, out_channels=out_channels, hourglass_depth=hourglass_depth, att_scales=att_scales, in_size=in_size)) else: setattr(stage, "unit{}".format(j + 1), ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride)) in_channels = out_channels in_size = tuple([x // stride for x in in_size]) setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, 'post_activ', PreActivation( in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_resattnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ResAttNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 56: att_layers = [1, 1, 1] att_scales = [1, 2, 1] elif blocks == 92: att_layers = [1, 2, 3] att_scales = [1, 2, 1] elif blocks == 128: att_layers = [2, 3, 4] att_scales = [1, 2, 1] elif blocks == 164: att_layers = [3, 4, 5] att_scales = [1, 2, 1] elif blocks == 200: att_layers = [4, 5, 6] att_scales = [1, 2, 1] elif blocks == 236: att_layers = [5, 6, 7] att_scales = [1, 2, 1] elif blocks == 452: att_layers = [5, 6, 7] att_scales = [2, 4, 3] else: raise ValueError("Unsupported ResAttNet with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] layers = att_layers + [2] channels = [[ci] * (li + 1) for (ci, li) in zip(channels_per_layers, layers)] attentions = [[0] + [1] * li for li in att_layers] + [[0] * 3] net = ResAttNet( channels=channels, init_block_channels=init_block_channels, attentions=attentions, att_scales=att_scales, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def resattnet56(**kwargs): """ ResAttNet-56 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resattnet(blocks=56, model_name="resattnet56", **kwargs) def resattnet92(**kwargs): """ ResAttNet-92 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resattnet(blocks=92, model_name="resattnet92", **kwargs) def resattnet128(**kwargs): """ ResAttNet-128 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resattnet(blocks=128, model_name="resattnet128", **kwargs) def resattnet164(**kwargs): """ ResAttNet-164 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resattnet(blocks=164, model_name="resattnet164", **kwargs) def resattnet200(**kwargs): """ ResAttNet-200 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resattnet(blocks=200, model_name="resattnet200", **kwargs) def resattnet236(**kwargs): """ ResAttNet-236 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resattnet(blocks=236, model_name="resattnet236", **kwargs) def resattnet452(**kwargs): """ ResAttNet-452 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resattnet(blocks=452, model_name="resattnet452", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ resattnet56, resattnet92, resattnet128, resattnet164, resattnet200, resattnet236, resattnet452, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != resattnet56 or weight_count == 31810728) assert (model != resattnet92 or weight_count == 52466344) assert (model != resattnet128 or weight_count == 65294504) assert (model != resattnet164 or weight_count == 78122664) assert (model != resattnet200 or weight_count == 90950824) assert (model != resattnet236 or weight_count == 103778984) assert (model != resattnet452 or weight_count == 182285224) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
21,611
30.096403
117
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/centernet.py
""" CenterNet for ImageNet-1K, implemented in Chainer. Original paper: 'Objects as Points,' https://arxiv.org/abs/1904.07850. """ __all__ = ['CenterNet', 'centernet_resnet18_voc', 'centernet_resnet18_coco', 'centernet_resnet50b_voc', 'centernet_resnet50b_coco', 'centernet_resnet101b_voc', 'centernet_resnet101b_coco', 'CenterNetHeatmapMaxDet'] import os import chainer.functions as F from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, conv3x3_block, DeconvBlock, Concurrent, SimpleSequential from .resnet import resnet18, resnet50b, resnet101b class CenterNetDecoderUnit(Chain): """ CenterNet decoder unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels, **kwargs): super(CenterNetDecoderUnit, self).__init__(**kwargs) with self.init_scope(): self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, use_bias=True) self.deconv = DeconvBlock( in_channels=out_channels, out_channels=out_channels, ksize=4, stride=2, pad=1) def __call__(self, x): x = self.conv(x) x = self.deconv(x) return x class CenterNetHeadBlock(Chain): """ CenterNet simple head block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels, **kwargs): super(CenterNetHeadBlock, self).__init__(**kwargs) with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=in_channels, use_bias=True, use_bn=False) self.conv2 = conv1x1( in_channels=in_channels, out_channels=out_channels, use_bias=True) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class CenterNetHeatmapBlock(Chain): """ CenterNet heatmap block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. do_nms : bool Whether do NMS (or simply clip for training otherwise). """ def __init__(self, in_channels, out_channels, do_nms, **kwargs): super(CenterNetHeatmapBlock, self).__init__(**kwargs) self.do_nms = do_nms with self.init_scope(): self.head = CenterNetHeadBlock( in_channels=in_channels, out_channels=out_channels) self.sigmoid = F.sigmoid if self.do_nms: self.pool = partial( F.max_pooling_2d, ksize=3, stride=1, pad=1, cover_all=False) def __call__(self, x): x = self.head(x) x = self.sigmoid(x) if self.do_nms: y = self.pool(x) x = x * (y.array == x.array) else: eps = 1e-4 x = F.clip(x, x_min=eps, x_max=(1.0 - eps)) return x class CenterNetHeatmapMaxDet(Chain): """ CenterNet decoder for heads (heatmap, wh, reg). Parameters: ---------- topk : int, default 40 Keep only `topk` detections. scale : int, default is 4 Downsampling scale factor. max_batch : int, default is 256 Maximal batch size. """ def __init__(self, topk=40, scale=4, max_batch=256, **kwargs): super(CenterNetHeatmapMaxDet, self).__init__(**kwargs) self.topk = topk self.scale = scale self.max_batch = max_batch def __call__(self, x): import numpy as np heatmap = x[:, :-4].array wh = x[:, -4:-2].array reg = x[:, -2:].array batch, _, out_h, out_w = heatmap.shape heatmap_flat = heatmap.reshape((batch, -1)) indices = np.argsort(heatmap_flat)[:, -self.topk:] scores = np.take_along_axis(heatmap_flat, indices=indices, axis=-1) topk_classes = (indices // (out_h * out_w)).astype(dtype=np.float32) topk_indices = indices % (out_h * out_w) topk_ys = (topk_indices // out_w).astype(dtype=np.float32) topk_xs = (topk_indices % out_w).astype(dtype=np.float32) center = reg.transpose((0, 2, 3, 1)).reshape((batch, -1, 2)) wh = wh.transpose((0, 2, 3, 1)).reshape((batch, -1, 2)) xs = np.take_along_axis(center[:, :, 0], indices=topk_indices, axis=-1) ys = np.take_along_axis(center[:, :, 1], indices=topk_indices, axis=-1) topk_xs = topk_xs + xs topk_ys = topk_ys + ys w = np.take_along_axis(wh[:, :, 0], indices=topk_indices, axis=-1) h = np.take_along_axis(wh[:, :, 1], indices=topk_indices, axis=-1) half_w = 0.5 * w half_h = 0.5 * h bboxes = F.stack((topk_xs - half_w, topk_ys - half_h, topk_xs + half_w, topk_ys + half_h), axis=-1) bboxes = bboxes * self.scale topk_classes = F.expand_dims(topk_classes, axis=-1) scores = F.expand_dims(scores, axis=-1) result = F.concat((bboxes, topk_classes, scores), axis=-1) return result class CenterNet(Chain): """ CenterNet model from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. channels : list of int Number of output channels for each decoder unit. return_heatmap : bool, default False Whether to return only heatmap. topk : int, default 40 Keep only `topk` detections. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (512, 512) Spatial size of the expected input image. classes : int, default 80 Number of classification classes. """ def __init__(self, backbone, backbone_out_channels, channels, return_heatmap=False, topk=40, in_channels=3, in_size=(512, 512), classes=80, **kwargs): super(CenterNet, self).__init__(**kwargs) self.in_size = in_size self.in_channels = in_channels self.return_heatmap = return_heatmap with self.init_scope(): self.backbone = backbone self.decoder = SimpleSequential() with self.decoder.init_scope(): in_channels = backbone_out_channels for i, out_channels in enumerate(channels): setattr(self.decoder, "unit{}".format(i + 1), CenterNetDecoderUnit( in_channels=in_channels, out_channels=out_channels)) in_channels = out_channels heads = Concurrent() with heads.init_scope(): setattr(heads, "heapmap_block", CenterNetHeatmapBlock( in_channels=in_channels, out_channels=classes, do_nms=(not self.return_heatmap))) setattr(heads, "wh_block", CenterNetHeadBlock( in_channels=in_channels, out_channels=2)) setattr(heads, "reg_block", CenterNetHeadBlock( in_channels=in_channels, out_channels=2)) setattr(self.decoder, "heads", heads) if not self.return_heatmap: self.heatmap_max_det = CenterNetHeatmapMaxDet( topk=topk, scale=4) def __call__(self, x): x = self.backbone(x) x = self.decoder(x) if not self.return_heatmap: x = self.heatmap_max_det(x) return x def get_centernet(backbone, backbone_out_channels, classes, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create CenterNet model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. classes : int Number of classes. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. Returns: ------- HybridBlock A network. """ channels = [256, 128, 64] net = CenterNet( backbone=backbone, backbone_out_channels=backbone_out_channels, channels=channels, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def centernet_resnet18_voc(pretrained_backbone=False, classes=20, **kwargs): """ CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 20 Number of classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnet18(pretrained=pretrained_backbone).features del backbone.final_pool return get_centernet(backbone=backbone, backbone_out_channels=512, classes=classes, model_name="centernet_resnet18_voc", **kwargs) def centernet_resnet18_coco(pretrained_backbone=False, classes=80, **kwargs): """ CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 80 Number of classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnet18(pretrained=pretrained_backbone).features del backbone.final_pool return get_centernet(backbone=backbone, backbone_out_channels=512, classes=classes, model_name='centernet_resnet18_coco', **kwargs) def centernet_resnet50b_voc(pretrained_backbone=False, classes=20, **kwargs): """ CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 20 Number of classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnet50b(pretrained=pretrained_backbone).features del backbone.final_pool return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes, model_name="centernet_resnet50b_voc", **kwargs) def centernet_resnet50b_coco(pretrained_backbone=False, classes=80, **kwargs): """ CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 80 Number of classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnet50b(pretrained=pretrained_backbone).features del backbone.final_pool return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes, model_name="centernet_resnet50b_coco", **kwargs) def centernet_resnet101b_voc(pretrained_backbone=False, classes=20, **kwargs): """ CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 20 Number of classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnet101b(pretrained=pretrained_backbone).features del backbone.final_pool return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes, model_name="centernet_resnet101b_voc", **kwargs) def centernet_resnet101b_coco(pretrained_backbone=False, classes=80, **kwargs): """ CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 80 Number of classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnet101b(pretrained=pretrained_backbone).features del backbone.final_pool return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes, model_name="centernet_resnet101b_coco", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False in_size = (512, 512) topk = 40 return_heatmap = False pretrained = False models = [ (centernet_resnet18_voc, 20), (centernet_resnet18_coco, 80), (centernet_resnet50b_voc, 20), (centernet_resnet50b_coco, 80), (centernet_resnet101b_voc, 20), (centernet_resnet101b_coco, 80), ] for model, classes in models: net = model(pretrained=pretrained, topk=topk, in_size=in_size, return_heatmap=return_heatmap) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != centernet_resnet18_voc or weight_count == 14215640) assert (model != centernet_resnet18_coco or weight_count == 14219540) assert (model != centernet_resnet50b_voc or weight_count == 30086104) assert (model != centernet_resnet50b_coco or weight_count == 30090004) assert (model != centernet_resnet101b_voc or weight_count == 49078232) assert (model != centernet_resnet101b_coco or weight_count == 49082132) batch = 14 x = np.random.rand(batch, 3, in_size[0], in_size[1]).astype(np.float32) y = net(x) assert (y.shape[0] == batch) if return_heatmap: assert (y.shape[1] == classes + 4) and (y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4) else: assert (y.shape[1] == topk) and (y.shape[2] == 6) if __name__ == "__main__": _test()
16,923
33.259109
118
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/xdensenet_cifar.py
""" X-DenseNet for CIFAR/SVHN, implemented in Chainer. Original paper: 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. """ __all__ = ['CIFARXDenseNet', 'xdensenet40_2_k24_bc_cifar10', 'xdensenet40_2_k24_bc_cifar100', 'xdensenet40_2_k24_bc_svhn', 'xdensenet40_2_k36_bc_cifar10', 'xdensenet40_2_k36_bc_cifar100', 'xdensenet40_2_k36_bc_svhn'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3, SimpleSequential from .preresnet import PreResActivation from .densenet import TransitionBlock from .xdensenet import pre_xconv3x3_block, XDenseUnit class XDenseSimpleUnit(Chain): """ X-DenseNet simple unit for CIFAR. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. expand_ratio : int Ratio of expansion. """ def __init__(self, in_channels, out_channels, dropout_rate, expand_ratio): super(XDenseSimpleUnit, self).__init__() self.use_dropout = (dropout_rate != 0.0) inc_channels = out_channels - in_channels with self.init_scope(): self.conv = pre_xconv3x3_block( in_channels=in_channels, out_channels=inc_channels, expand_ratio=expand_ratio) if self.use_dropout: self.dropout = partial( F.dropout, ratio=dropout_rate) def __call__(self, x): identity = x x = self.conv(x) if self.use_dropout: x = self.dropout(x) x = F.concat((identity, x), axis=1) return x class CIFARXDenseNet(Chain): """ X-DenseNet model for CIFAR from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. expand_ratio : int, default 2 Ratio of expansion. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, dropout_rate=0.0, expand_ratio=2, in_channels=3, in_size=(32, 32), classes=10): super(CIFARXDenseNet, self).__init__() self.in_size = in_size self.classes = classes unit_class = XDenseUnit if bottleneck else XDenseSimpleUnit with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): if i != 0: setattr(stage, "trans{}".format(i + 1), TransitionBlock( in_channels=in_channels, out_channels=(in_channels // 2))) in_channels = in_channels // 2 for j, out_channels in enumerate(channels_per_stage): setattr(stage, "unit{}".format(j + 1), unit_class( in_channels=in_channels, out_channels=out_channels, dropout_rate=dropout_rate, expand_ratio=expand_ratio)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "post_activ", PreResActivation(in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_xdensenet_cifar(classes, blocks, growth_rate, bottleneck, expand_ratio=2, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create X-DenseNet model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. growth_rate : int Growth rate. bottleneck : bool Whether to use a bottleneck or simple block in units. expand_ratio : int, default 2 Ratio of expansion. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 4) % 6 == 0) layers = [(blocks - 4) // 6] * 3 else: assert ((blocks - 4) % 3 == 0) layers = [(blocks - 4) // 3] * 3 init_block_channels = 2 * growth_rate from functools import reduce channels = reduce( lambda xi, yi: xi + [reduce( lambda xj, yj: xj + [xj[-1] + yj], [growth_rate] * yi, [xi[-1][-1] // 2])[1:]], layers, [[init_block_channels * 2]])[1:] net = CIFARXDenseNet( channels=channels, init_block_channels=init_block_channels, classes=classes, bottleneck=bottleneck, expand_ratio=expand_ratio, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def xdensenet40_2_k24_bc_cifar10(classes=10, **kwargs): """ X-DenseNet-BC-40-2 (k=24) model for CIFAR-10 from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_xdensenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True, model_name="xdensenet40_2_k24_bc_cifar10", **kwargs) def xdensenet40_2_k24_bc_cifar100(classes=100, **kwargs): """ X-DenseNet-BC-40-2 (k=24) model for CIFAR-100 from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_xdensenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True, model_name="xdensenet40_2_k24_bc_cifar100", **kwargs) def xdensenet40_2_k24_bc_svhn(classes=10, **kwargs): """ X-DenseNet-BC-40-2 (k=24) model for SVHN from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_xdensenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True, model_name="xdensenet40_2_k24_bc_svhn", **kwargs) def xdensenet40_2_k36_bc_cifar10(classes=10, **kwargs): """ X-DenseNet-BC-40-2 (k=36) model for CIFAR-10 from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_xdensenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True, model_name="xdensenet40_2_k36_bc_cifar10", **kwargs) def xdensenet40_2_k36_bc_cifar100(classes=100, **kwargs): """ X-DenseNet-BC-40-2 (k=36) model for CIFAR-100 from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_xdensenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True, model_name="xdensenet40_2_k36_bc_cifar100", **kwargs) def xdensenet40_2_k36_bc_svhn(classes=10, **kwargs): """ X-DenseNet-BC-40-2 (k=36) model for SVHN from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_xdensenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True, model_name="xdensenet40_2_k36_bc_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (xdensenet40_2_k24_bc_cifar10, 10), (xdensenet40_2_k24_bc_cifar100, 100), (xdensenet40_2_k24_bc_svhn, 10), (xdensenet40_2_k36_bc_cifar10, 10), (xdensenet40_2_k36_bc_cifar100, 100), (xdensenet40_2_k36_bc_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != xdensenet40_2_k24_bc_cifar10 or weight_count == 690346) assert (model != xdensenet40_2_k24_bc_cifar100 or weight_count == 714196) assert (model != xdensenet40_2_k24_bc_svhn or weight_count == 690346) assert (model != xdensenet40_2_k36_bc_cifar10 or weight_count == 1542682) assert (model != xdensenet40_2_k36_bc_cifar100 or weight_count == 1578412) assert (model != xdensenet40_2_k36_bc_svhn or weight_count == 1542682) x = np.zeros((1, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (1, classes)) if __name__ == "__main__": _test()
13,033
34.906336
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/ntsnet_cub.py
""" NTS-Net for CUB-200-2011, implemented in Chainer. Original paper: 'Learning to Navigate for Fine-grained Classification,' https://arxiv.org/abs/1809.00287. """ __all__ = ['NTSNet', 'ntsnet_cub'] import os import numpy as np import chainer.functions as F import chainer.links as L from chainer import Chain from chainer.serializers import load_npz from functools import partial from .common import conv1x1, conv3x3, Flatten, AdaptiveAvgPool2D, SimpleSequential from .resnet import resnet50b def hard_nms(cdds, top_n=10, iou_thresh=0.25): """ Hard Non-Maximum Suppression. Parameters: ---------- cdds : np.array Borders. top_n : int, default 10 Number of top-K informative regions. iou_thresh : float, default 0.25 IoU threshold. Returns: ------- np.array Filtered borders. """ assert (type(cdds) == np.ndarray) assert (len(cdds.shape) == 2) assert (cdds.shape[1] >= 5) cdds = cdds.copy() indices = np.argsort(cdds[:, 0]) cdds = cdds[indices] cdd_results = [] res = cdds while res.any(): cdd = res[-1] cdd_results.append(cdd) if len(cdd_results) == top_n: return np.array(cdd_results) res = res[:-1] start_max = np.maximum(res[:, 1:3], cdd[1:3]) end_min = np.minimum(res[:, 3:5], cdd[3:5]) lengths = end_min - start_max intersec_map = lengths[:, 0] * lengths[:, 1] intersec_map[np.logical_or(lengths[:, 0] < 0, lengths[:, 1] < 0)] = 0 iou_map_cur = intersec_map / ((res[:, 3] - res[:, 1]) * (res[:, 4] - res[:, 2]) + (cdd[3] - cdd[1]) * ( cdd[4] - cdd[2]) - intersec_map) res = res[iou_map_cur < iou_thresh] return np.array(cdd_results) class NavigatorBranch(Chain): """ Navigator branch block for Navigator unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(NavigatorBranch, self).__init__() mid_channels = 128 with self.init_scope(): self.down_conv = conv3x3( in_channels=in_channels, out_channels=mid_channels, stride=stride, use_bias=True) self.activ = F.relu self.tidy_conv = conv1x1( in_channels=mid_channels, out_channels=out_channels, use_bias=True) self.flatten = Flatten() def __call__(self, x): y = self.down_conv(x) y = self.activ(y) z = self.tidy_conv(y) z = self.flatten(z) return z, y class NavigatorUnit(Chain): """ Navigator init. """ def __init__(self): super(NavigatorUnit, self).__init__() with self.init_scope(): self.branch1 = NavigatorBranch( in_channels=2048, out_channels=6, stride=1) self.branch2 = NavigatorBranch( in_channels=128, out_channels=6, stride=2) self.branch3 = NavigatorBranch( in_channels=128, out_channels=9, stride=2) def __call__(self, x): t1, x = self.branch1(x) t2, x = self.branch2(x) t3, _ = self.branch3(x) return F.concat((t1, t2, t3), axis=1) class NTSNet(Chain): """ NTS-Net model from 'Learning to Navigate for Fine-grained Classification,' https://arxiv.org/abs/1809.00287. Parameters: ---------- backbone : nn.Sequential Feature extractor. aux : bool, default False Whether to output auxiliary results. top_n : int, default 4 Number of extra top-K informative regions. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, backbone, aux=False, top_n=4, in_channels=3, in_size=(448, 448), classes=200): super(NTSNet, self).__init__() assert (in_channels > 0) self.in_size = in_size self.classes = classes pad_side = 224 self.pad_width = ((0, 0), (0, 0), (pad_side, pad_side), (pad_side, pad_side)) self.top_n = top_n self.aux = aux self.num_cat = 4 _, edge_anchors, _ = self._generate_default_anchor_maps() self.edge_anchors = (edge_anchors + 224).astype(np.int) self.edge_anchors = np.concatenate( (self.edge_anchors.copy(), np.arange(0, len(self.edge_anchors)).reshape(-1, 1)), axis=1) with self.init_scope(): self.backbone = backbone self.backbone_tail = SimpleSequential() with self.backbone_tail.init_scope(): setattr(self.backbone_tail, "final_pool", AdaptiveAvgPool2D()) setattr(self.backbone_tail, "flatten", Flatten()) setattr(self.backbone_tail, "dropout", partial( F.dropout, ratio=0.5)) self.backbone_classifier = L.Linear( in_size=(512 * 4), out_size=classes) self.navigator_unit = NavigatorUnit() self.concat_net = L.Linear( in_size=(2048 * (self.num_cat + 1)), out_size=classes) if self.aux: self.partcls_net = L.Linear( in_size=(512 * 4), out_size=classes) def __call__(self, x): raw_pre_features = self.backbone(x) rpn_score = self.navigator_unit(raw_pre_features) rpn_score.to_cpu() all_cdds = [np.concatenate((y.reshape(-1, 1), self.edge_anchors.copy()), axis=1) for y in rpn_score.array] top_n_cdds = [hard_nms(y, top_n=self.top_n, iou_thresh=0.25) for y in all_cdds] top_n_cdds = np.array(top_n_cdds) top_n_index = top_n_cdds[:, :, -1].astype(np.int64) top_n_index = np.array(top_n_index, dtype=np.int64) top_n_prob = np.take_along_axis(rpn_score.array, top_n_index, axis=1) batch = x.shape[0] x_pad = F.pad(x, pad_width=self.pad_width, mode="constant", constant_values=0) part_imgs = [] for i in range(batch): for j in range(self.top_n): y0, x0, y1, x1 = tuple(top_n_cdds[i][j, 1:5].astype(np.int64)) x_res = F.resize_images( x_pad[i:i + 1, :, y0:y1, x0:x1], output_shape=(224, 224)) part_imgs.append(x_res) part_imgs = F.concat(tuple(part_imgs), axis=0) part_features = self.backbone_tail(self.backbone(part_imgs)) part_feature = part_features.reshape((batch, self.top_n, -1)) part_feature = part_feature[:, :self.num_cat, :] part_feature = part_feature.reshape((batch, -1)) raw_features = self.backbone_tail(raw_pre_features) concat_out = F.concat((part_feature, raw_features), axis=1) concat_logits = self.concat_net(concat_out) if self.aux: raw_logits = self.backbone_classifier(raw_features) part_logits = self.partcls_net(part_features).reshape((batch, self.top_n, -1)) return concat_logits, raw_logits, part_logits, top_n_prob else: return concat_logits @staticmethod def _generate_default_anchor_maps(input_shape=(448, 448)): """ Generate default anchor maps. Parameters: ---------- input_shape : tuple of 2 int Input image size. Returns: ------- center_anchors : np.array anchors * 4 (oy, ox, h, w). edge_anchors : np.array anchors * 4 (y0, x0, y1, x1). anchor_area : np.array anchors * 1 (area). """ anchor_scale = [2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)] anchor_aspect_ratio = [0.667, 1, 1.5] anchors_setting = ( dict(layer="p3", stride=32, size=48, scale=anchor_scale, aspect_ratio=anchor_aspect_ratio), dict(layer="p4", stride=64, size=96, scale=anchor_scale, aspect_ratio=anchor_aspect_ratio), dict(layer="p5", stride=128, size=192, scale=[1, anchor_scale[0], anchor_scale[1]], aspect_ratio=anchor_aspect_ratio), ) center_anchors = np.zeros((0, 4), dtype=np.float32) edge_anchors = np.zeros((0, 4), dtype=np.float32) anchor_areas = np.zeros((0,), dtype=np.float32) input_shape = np.array(input_shape, dtype=int) for anchor_info in anchors_setting: stride = anchor_info["stride"] size = anchor_info["size"] scales = anchor_info["scale"] aspect_ratios = anchor_info["aspect_ratio"] output_map_shape = np.ceil(input_shape.astype(np.float32) / stride) output_map_shape = output_map_shape.astype(np.int) output_shape = tuple(output_map_shape) + (4, ) ostart = stride / 2.0 oy = np.arange(ostart, ostart + stride * output_shape[0], stride) oy = oy.reshape(output_shape[0], 1) ox = np.arange(ostart, ostart + stride * output_shape[1], stride) ox = ox.reshape(1, output_shape[1]) center_anchor_map_template = np.zeros(output_shape, dtype=np.float32) center_anchor_map_template[:, :, 0] = oy center_anchor_map_template[:, :, 1] = ox for anchor_scale in scales: for anchor_aspect_ratio in aspect_ratios: center_anchor_map = center_anchor_map_template.copy() center_anchor_map[:, :, 2] = size * anchor_scale / float(anchor_aspect_ratio) ** 0.5 center_anchor_map[:, :, 3] = size * anchor_scale * float(anchor_aspect_ratio) ** 0.5 edge_anchor_map = np.concatenate( (center_anchor_map[:, :, :2] - center_anchor_map[:, :, 2:4] / 2.0, center_anchor_map[:, :, :2] + center_anchor_map[:, :, 2:4] / 2.0), axis=-1) anchor_area_map = center_anchor_map[:, :, 2] * center_anchor_map[:, :, 3] center_anchors = np.concatenate((center_anchors, center_anchor_map.reshape(-1, 4))) edge_anchors = np.concatenate((edge_anchors, edge_anchor_map.reshape(-1, 4))) anchor_areas = np.concatenate((anchor_areas, anchor_area_map.reshape(-1))) return center_anchors, edge_anchors, anchor_areas def get_ntsnet(backbone, aux=False, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create NTS-Net model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. aux : bool, default False Whether to output auxiliary results. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ net = NTSNet( backbone=backbone, aux=aux, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def ntsnet_cub(pretrained_backbone=False, aux=True, **kwargs): """ NTS-Net model from 'Learning to Navigate for Fine-grained Classification,' https://arxiv.org/abs/1809.00287. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnet50b(pretrained=pretrained_backbone).features del backbone.final_pool return get_ntsnet(backbone=backbone, aux=aux, model_name="ntsnet_cub", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False aux = True pretrained = False models = [ ntsnet_cub, ] for model in models: net = model(pretrained=pretrained, aux=aux) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) if aux: assert (model != ntsnet_cub or weight_count == 29033133) else: assert (model != ntsnet_cub or weight_count == 28623333) x = np.zeros((5, 3, 448, 448), np.float32) ys = net(x) y = ys[0] if aux else ys assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == 200)) if __name__ == "__main__": _test()
13,831
33.153086
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/proxylessnas_cub.py
""" ProxylessNAS for CUB-200-2011, implemented in Chainer. Original paper: 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. """ __all__ = ['proxylessnas_cpu_cub', 'proxylessnas_gpu_cub', 'proxylessnas_mobile_cub', 'proxylessnas_mobile14_cub'] from .proxylessnas import get_proxylessnas def proxylessnas_cpu_cub(classes=200, **kwargs): """ ProxylessNAS (CPU) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_proxylessnas(classes=classes, version="cpu", model_name="proxylessnas_cpu_cub", **kwargs) def proxylessnas_gpu_cub(classes=200, **kwargs): """ ProxylessNAS (GPU) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_proxylessnas(classes=classes, version="gpu", model_name="proxylessnas_gpu_cub", **kwargs) def proxylessnas_mobile_cub(classes=200, **kwargs): """ ProxylessNAS (Mobile) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_proxylessnas(classes=classes, version="mobile", model_name="proxylessnas_mobile_cub", **kwargs) def proxylessnas_mobile14_cub(classes=200, **kwargs): """ ProxylessNAS (Mobile-14) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_proxylessnas(classes=classes, version="mobile14", model_name="proxylessnas_mobile14_cub", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ proxylessnas_cpu_cub, proxylessnas_gpu_cub, proxylessnas_mobile_cub, proxylessnas_mobile14_cub, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != proxylessnas_cpu_cub or weight_count == 3215248) assert (model != proxylessnas_gpu_cub or weight_count == 5736648) assert (model != proxylessnas_mobile_cub or weight_count == 3055712) assert (model != proxylessnas_mobile14_cub or weight_count == 5423168) x = np.zeros((14, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (14, 200)) if __name__ == "__main__": _test()
3,839
33.285714
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/common.py
""" Common routines for models in Chainer. """ __all__ = ['round_channels', 'BreakBlock', 'ReLU6', 'HSwish', 'get_activation_layer', 'GlobalAvgPool2D', 'SelectableDense', 'DenseBlock', 'ConvBlock1d', 'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'ConvBlock', 'conv1x1_block', 'conv3x3_block', 'conv5x5_block', 'conv7x7_block', 'dwconv_block', 'dwconv3x3_block', 'dwconv5x5_block', 'dwsconv3x3_block', 'PreConvBlock', 'pre_conv1x1_block', 'pre_conv3x3_block', 'DeconvBlock', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'SABlock', 'SAConvBlock', 'saconv3x3_block', 'PixelShuffle', 'DucBlock', 'SimpleSequential', 'DualPathSequential', 'Concurrent', 'SequentialConcurrent', 'ParametricSequential', 'ParametricConcurrent', 'Hourglass', 'SesquialteralHourglass', 'MultiOutputSequential', 'ParallelConcurent', 'DualPathParallelConcurent', 'Flatten', 'AdaptiveAvgPool2D', 'NormActivation', 'InterpolationBlock', 'HeatmapMaxDetBlock'] from inspect import isfunction from functools import partial import numpy as np from chainer import Chain import chainer.functions as F import chainer.links as L from chainer import link from chainer.initializers import _get_initializer from chainer.variable import Parameter def round_channels(channels, divisor=8): """ Round weighted channel number (make divisible operation). Parameters: ---------- channels : int or float Original number of channels. divisor : int, default 8 Alignment value. Returns: ------- int Weighted number of channels. """ rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor) if float(rounded_channels) < 0.9 * channels: rounded_channels += divisor return rounded_channels class BreakBlock(Chain): """ Break coonnection block for hourglass. """ def __init__(self): super(BreakBlock, self).__init__() def __call__(self, x): return None def __repr__(self): return '{name}()'.format(name=self.__class__.__name__) class ReLU6(Chain): """ ReLU6 activation layer. """ def __call__(self, x): return F.clip(x, 0.0, 6.0) class Swish(Chain): """ Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941. """ def __call__(self, x): return x * F.sigmoid(x) class HSigmoid(Chain): """ Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. """ def __call__(self, x): return F.clip(x + 3.0, 0.0, 6.0) / 6.0 class HSwish(Chain): """ H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. """ def __call__(self, x): return x * F.clip(x + 3.0, 0.0, 6.0) / 6.0 def get_activation_layer(activation): """ Create activation layer from string/function. Parameters: ---------- activation : function or str Activation function or name of activation function. Returns: ------- function Activation layer. """ assert (activation is not None) if isfunction(activation): return activation() elif isinstance(activation, str): if activation == "relu": return F.relu elif activation == "relu6": return ReLU6() elif activation == "swish": return Swish() # return partial( # F.swish, # beta=[1.0]) elif activation == "hswish": return HSwish() elif activation == "sigmoid": return F.sigmoid elif activation == "hsigmoid": return HSigmoid() else: raise NotImplementedError() else: return activation class GlobalAvgPool2D(Chain): """ Global average pooling operation for spatial data. """ def __call__(self, x): return F.average_pooling_2d(x, ksize=x.shape[2:]) class SelectableDense(link.Link): """ Selectable dense layer. Parameters: ---------- in_channels : int Number of input features. out_channels : int Number of output features. use_bias : bool, default False Whether the layer uses a bias vector. initial_weight : `types.InitializerSpec`, default None Initializer for the `kernel` weights matrix. initial_bias: `types.InitializerSpec`, default 0 Initializer for the bias vector. num_options : int, default 1 Number of selectable options. """ def __init__(self, in_channels, out_channels, use_bias=False, initial_weight=None, initial_bias=0, num_options=1): super(SelectableDense, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.use_bias = use_bias self.num_options = num_options with self.init_scope(): weight_initializer = _get_initializer(initial_weight) self.weight = Parameter( initializer=weight_initializer, shape=(num_options, out_channels, in_channels), name="weight") if use_bias: bias_initializer = _get_initializer(initial_bias) self.bias = Parameter( initializer=bias_initializer, shape=(num_options, out_channels), name="bias") else: self.bias = None def forward(self, x, indices): weight = self.xp.take(self.weight.data, indices=indices, axis=0) x = F.expand_dims(x, axis=-1) x = F.batch_matmul(weight, x) x = F.squeeze(x, axis=-1) if self.use_bias: bias = self.xp.take(self.bias.data, indices=indices, axis=0) x += bias return x @property def printable_specs(self): specs = [ ('in_channels', self.in_channels), ('out_channels', self.out_channels), ('use_bias', self.use_bias), ('num_options', self.num_options), ] for spec in specs: yield spec class DenseBlock(Chain): """ Standard dense block with Batch normalization and activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.relu Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu)): super(DenseBlock, self).__init__() self.activate = (activation is not None) self.use_bn = use_bn with self.init_scope(): self.fc = L.Linear( in_size=in_channels, out_size=out_channels, nobias=(not use_bias)) if self.use_bn: self.bn = L.BatchNormalization( size=out_channels, eps=bn_eps) if self.activate: self.activ = get_activation_layer(activation) def __call__(self, x): x = self.fc(x) if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) return x class ConvBlock1d(Chain): """ Standard 1D convolution block with Batch normalization and activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int Convolution window size. stride : int Stride of the convolution. pad : int Padding value for convolution layer. dilate : int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.relu Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu)): super(ConvBlock1d, self).__init__() self.activate = (activation is not None) self.use_bn = use_bn with self.init_scope(): self.conv = L.Convolution1D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=(not use_bias), dilate=dilate, groups=groups) if self.use_bn: self.bn = L.BatchNormalization( size=out_channels, eps=bn_eps) if self.activate: self.activ = get_activation_layer(activation) def __call__(self, x): x = self.conv(x) if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) return x def conv1x1(in_channels, out_channels, stride=1, groups=1, use_bias=False): """ Convolution 1x1 layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. """ return L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, nobias=(not use_bias), groups=groups) def conv3x3(in_channels, out_channels, stride=1, pad=1, dilate=1, groups=1, use_bias=False): """ Convolution 3x3 layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. """ return L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, nobias=(not use_bias), dilate=dilate, groups=groups) def depthwise_conv3x3(channels, stride=1, pad=1, dilate=1, use_bias=False): """ Depthwise convolution 3x3 layer. Parameters: ---------- channels : int Number of input/output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. """ return L.Convolution2D( in_channels=channels, out_channels=channels, ksize=3, stride=stride, pad=pad, nobias=(not use_bias), dilate=dilate, groups=channels) class ConvBlock(Chain): """ Standard convolution block with Batch normalization and activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.relu Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu)): super(ConvBlock, self).__init__() self.activate = (activation is not None) self.use_bn = use_bn with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=(not use_bias), dilate=dilate, groups=groups) if self.use_bn: self.bn = L.BatchNormalization( size=out_channels, eps=bn_eps) if self.activate: self.activ = get_activation_layer(activation) def __call__(self, x): x = self.conv(x) if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) return x def conv1x1_block(in_channels, out_channels, stride=1, pad=0, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu)): """ 1x1 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 0 Padding value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.relu Activation function or name of activation function. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, pad=pad, groups=groups, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) def conv3x3_block(in_channels, out_channels, stride=1, pad=1, dilate=1, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu)): """ 3x3 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.relu Activation function or name of activation function. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, dilate=dilate, groups=groups, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) def conv5x5_block(in_channels, out_channels, stride=1, pad=2, dilate=1, groups=1, use_bias=False, bn_eps=1e-5, activation=(lambda: F.relu)): """ 5x5 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 2 Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.relu Activation function or name of activation function. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=5, stride=stride, pad=pad, dilate=dilate, groups=groups, use_bias=use_bias, bn_eps=bn_eps, activation=activation) def conv7x7_block(in_channels, out_channels, stride=1, pad=3, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu)): """ 7x7 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 3 Padding value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.relu Activation function or name of activation function. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=7, stride=stride, pad=pad, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) def dwconv_block(in_channels, out_channels, ksize, stride, pad, dilate=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu)): """ Depthwise version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.relu Activation function or name of activation function. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, dilate=dilate, groups=out_channels, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) def dwconv3x3_block(in_channels, out_channels, stride=1, pad=1, dilate=1, use_bias=False, bn_eps=1e-5, activation=(lambda: F.relu)): """ 3x3 depthwise version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.relu Activation function or name of activation function. """ return dwconv_block( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, dilate=dilate, use_bias=use_bias, bn_eps=bn_eps, activation=activation) def dwconv5x5_block(in_channels, out_channels, stride=1, pad=2, dilate=1, use_bias=False, bn_eps=1e-5, activation=(lambda: F.relu)): """ 5x5 depthwise version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 2 Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.relu Activation function or name of activation function. """ return dwconv_block( in_channels=in_channels, out_channels=out_channels, ksize=5, stride=stride, pad=pad, dilate=dilate, use_bias=use_bias, bn_eps=bn_eps, activation=activation) class DwsConvBlock(Chain): """ Depthwise separable convolution block with BatchNorms and activations at each convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. dw_use_bn : bool, default True Whether to use BatchNorm layer (depthwise convolution block). pw_use_bn : bool, default True Whether to use BatchNorm layer (pointwise convolution block). bn_eps : float, default 1e-5 Small float added to variance in Batch norm. dw_activation : function or str or None, default F.relu Activation function after the depthwise convolution block. pw_activation : function or str or None, default F.relu Activation function after the pointwise convolution block. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, use_bias=False, dw_use_bn=True, pw_use_bn=True, bn_eps=1e-5, dw_activation=(lambda: F.relu), pw_activation=(lambda: F.relu)): super(DwsConvBlock, self).__init__() with self.init_scope(): self.dw_conv = dwconv_block( in_channels=in_channels, out_channels=in_channels, ksize=ksize, stride=stride, pad=pad, dilate=dilate, use_bias=use_bias, use_bn=dw_use_bn, bn_eps=bn_eps, activation=dw_activation) self.pw_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, use_bn=pw_use_bn, bn_eps=bn_eps, activation=pw_activation) def __call__(self, x): x = self.dw_conv(x) x = self.pw_conv(x) return x def dwsconv3x3_block(in_channels, out_channels, stride=1, pad=1, dilate=1, use_bias=False, bn_eps=1e-5, dw_activation=(lambda: F.relu), pw_activation=(lambda: F.relu), **kwargs): """ 3x3 depthwise separable version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. dw_activation : function or str or None, default F.relu Activation function after the depthwise convolution block. pw_activation : function or str or None, default F.relu Activation function after the pointwise convolution block. """ return DwsConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, dilate=dilate, use_bias=use_bias, bn_eps=bn_eps, dw_activation=dw_activation, pw_activation=pw_activation, **kwargs) class PreConvBlock(Chain): """ Convolution block with Batch normalization and ReLU pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. return_preact : bool, default False Whether return pre-activation. It's used by PreResNet. activate : bool, default True Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, use_bias=False, use_bn=True, return_preact=False, activate=True): super(PreConvBlock, self).__init__() self.return_preact = return_preact self.activate = activate self.use_bn = use_bn with self.init_scope(): if self.use_bn: self.bn = L.BatchNormalization( size=in_channels, eps=1e-5) if self.activate: self.activ = F.relu self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=(not use_bias), dilate=dilate) def __call__(self, x): if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) if self.return_preact: x_pre_activ = x x = self.conv(x) if self.return_preact: return x, x_pre_activ else: return x def pre_conv1x1_block(in_channels, out_channels, stride=1, use_bias=False, use_bn=True, return_preact=False, activate=True): """ 1x1 version of the pre-activated convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. return_preact : bool, default False Whether return pre-activation. activate : bool, default True Whether activate the convolution block. """ return PreConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, pad=0, use_bias=use_bias, use_bn=use_bn, return_preact=return_preact, activate=activate) def pre_conv3x3_block(in_channels, out_channels, stride=1, pad=1, dilate=1, use_bias=False, use_bn=True, return_preact=False, activate=True): """ 3x3 version of the pre-activated convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. return_preact : bool, default False Whether return pre-activation. activate : bool, default True Whether activate the convolution block. """ return PreConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, dilate=dilate, use_bias=use_bias, use_bn=use_bn, return_preact=return_preact, activate=activate) class DeconvBlock(Chain): """ Deconvolution block with batch normalization and activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the deconvolution. pad : int or tuple/list of 2 int Padding value for deconvolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for deconvolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.relu Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu), **kwargs): super(DeconvBlock, self).__init__(**kwargs) self.activate = (activation is not None) self.use_bn = use_bn with self.init_scope(): self.conv = L.Deconvolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=(not use_bias), dilate=dilate, groups=groups) if self.use_bn: self.bn = L.BatchNormalization( size=out_channels, eps=bn_eps) if self.activate: self.activ = get_activation_layer(activation) def __call__(self, x): x = self.conv(x) if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) return x def channel_shuffle(x, groups): """ Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- x : chainer.Variable or numpy.ndarray or cupy.ndarray Input variable. groups : int Number of groups. Returns: ------- chainer.Variable or numpy.ndarray or cupy.ndarray Resulted variable. """ batch, channels, height, width = x.shape channels_per_group = channels // groups x = F.reshape(x, shape=(batch, groups, channels_per_group, height, width)) x = F.swapaxes(x, axis1=1, axis2=2) x = F.reshape(x, shape=(batch, channels, height, width)) return x class ChannelShuffle(Chain): """ Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups. Parameters: ---------- channels : int Number of channels. groups : int Number of groups. """ def __init__(self, channels, groups): super(ChannelShuffle, self).__init__() assert (channels % groups == 0) self.groups = groups def __call__(self, x): return channel_shuffle(x, self.groups) def channel_shuffle2(x, groups): """ Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. The alternative version. Parameters: ---------- x : chainer.Variable or numpy.ndarray or cupy.ndarray Input variable. groups : int Number of groups. Returns: ------- chainer.Variable or numpy.ndarray or cupy.ndarray Resulted variable. """ batch, channels, height, width = x.shape channels_per_group = channels // groups x = F.reshape(x, shape=(batch, channels_per_group, groups, height, width)) x = F.swapaxes(x, axis1=1, axis2=2) x = F.reshape(x, shape=(batch, channels, height, width)) return x class ChannelShuffle2(Chain): """ Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups. The alternative version. Parameters: ---------- channels : int Number of channels. groups : int Number of groups. """ def __init__(self, channels, groups): super(ChannelShuffle2, self).__init__() assert (channels % groups == 0) self.groups = groups def __call__(self, x): return channel_shuffle2(x, self.groups) class SEBlock(Chain): """ Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : int Number of channels. reduction : int, default 16 Squeeze reduction value. mid_channels : int or None, default None Number of middle channels. round_mid : bool, default False Whether to round middle channel number (make divisible by 8). use_conv : bool, default True Whether to convolutional layers instead of fully-connected ones. activation : function or str, default F.relu Activation function after the first convolution. out_activation : function or str, default F.sigmoid Activation function after the last convolution. """ def __init__(self, channels, reduction=16, mid_channels=None, round_mid=False, use_conv=True, mid_activation=(lambda: F.relu), out_activation=(lambda: F.sigmoid)): super(SEBlock, self).__init__() self.use_conv = use_conv if mid_channels is None: mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction) with self.init_scope(): if use_conv: self.conv1 = conv1x1( in_channels=channels, out_channels=mid_channels, use_bias=True) else: self.fc1 = L.Linear( in_size=channels, out_size=mid_channels) self.activ = get_activation_layer(mid_activation) if use_conv: self.conv2 = conv1x1( in_channels=mid_channels, out_channels=channels, use_bias=True) else: self.fc2 = L.Linear( in_size=mid_channels, out_size=channels) self.sigmoid = get_activation_layer(out_activation) def __call__(self, x): w = F.average_pooling_2d(x, ksize=x.shape[2:]) if not self.use_conv: w = F.reshape(w, shape=(w.shape[0], -1)) w = self.conv1(w) if self.use_conv else self.fc1(w) w = self.activ(w) w = self.conv2(w) if self.use_conv else self.fc2(w) w = self.sigmoid(w) if not self.use_conv: w = F.expand_dims(F.expand_dims(w, axis=2), axis=3) x = x * w return x class SABlock(Chain): """ Split-Attention block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- out_channels : int Number of output channels. groups : int Number of channel groups (cardinality, without radix). radix : int Number of splits within a cardinal group. reduction : int, default 4 Squeeze reduction value. min_channels : int, default 32 Minimal number of squeezed channels. use_conv : bool, default True Whether to convolutional layers instead of fully-connected ones. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. """ def __init__(self, out_channels, groups, radix, reduction=4, min_channels=32, use_conv=True, bn_eps=1e-5): super(SABlock, self).__init__() self.groups = groups self.radix = radix self.use_conv = use_conv in_channels = out_channels * radix mid_channels = max(in_channels // reduction, min_channels) with self.init_scope(): if use_conv: self.conv1 = conv1x1( in_channels=out_channels, out_channels=mid_channels, use_bias=True) else: self.fc1 = L.Linear( in_size=out_channels, out_size=mid_channels) self.bn = L.BatchNormalization( size=mid_channels, eps=bn_eps) self.activ = F.relu if use_conv: self.conv2 = conv1x1( in_channels=mid_channels, out_channels=in_channels, use_bias=True) else: self.fc2 = L.Linear( in_size=mid_channels, out_size=in_channels) self.softmax = partial( F.softmax, axis=1) def __call__(self, x): batch, channels, height, width = x.shape x = F.reshape(x, shape=(batch, self.radix, channels // self.radix, height, width)) w = F.sum(x, axis=1) w = F.average_pooling_2d(w, ksize=w.shape[2:]) if not self.use_conv: w = F.reshape(w, shape=(w.shape[0], -1)) w = self.conv1(w) if self.use_conv else self.fc1(w) w = self.bn(w) w = self.activ(w) w = self.conv2(w) if self.use_conv else self.fc2(w) w = F.reshape(w, shape=(batch, self.groups, self.radix, -1)) w = F.swapaxes(w, axis1=1, axis2=2) w = self.softmax(w) w = F.reshape(w, shape=(batch, self.radix, -1, 1, 1)) x = x * w x = F.sum(x, axis=1) return x class SAConvBlock(Chain): """ Split-Attention convolution block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.relu Activation function or name of activation function. radix : int, default 2 Number of splits within a cardinal group. reduction : int, default 4 Squeeze reduction value. min_channels : int, default 32 Minimal number of squeezed channels. use_conv : bool, default True Whether to convolutional layers instead of fully-connected ones. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu), radix=2, reduction=4, min_channels=32, use_conv=True): super(SAConvBlock, self).__init__() with self.init_scope(): self.conv = ConvBlock( in_channels=in_channels, out_channels=(out_channels * radix), ksize=ksize, stride=stride, pad=pad, dilate=dilate, groups=(groups * radix), use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) self.att = SABlock( out_channels=out_channels, groups=groups, radix=radix, reduction=reduction, min_channels=min_channels, use_conv=use_conv, bn_eps=bn_eps) def __call__(self, x): x = self.conv(x) x = self.att(x) return x def saconv3x3_block(in_channels, out_channels, stride=1, pad=1, **kwargs): """ 3x3 version of the Split-Attention convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for convolution layer. """ return SAConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, **kwargs) class PixelShuffle(Chain): """ Pixel-shuffle operation from 'Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network,' https://arxiv.org/abs/1609.05158. Parameters: ---------- scale_factor : int Multiplier for spatial size. """ def __init__(self, scale_factor, **kwargs): super(PixelShuffle, self).__init__(**kwargs) self.scale_factor = scale_factor def __call__(self, x): f1 = self.scale_factor f2 = self.scale_factor batch, channels, height, width = x.shape assert (channels % f1 % f2 == 0) new_channels = channels // f1 // f2 x = F.reshape(x, shape=(batch, new_channels, f1 * f2, height, width)) x = F.reshape(x, shape=(batch, new_channels, f1, f2, height, width)) x = F.transpose(x, axes=(0, 1, 4, 2, 5, 3)) x = F.reshape(x, shape=(batch, new_channels, height * f1, width * f2)) return x class DucBlock(Chain): """ Dense Upsampling Convolution (DUC) block from 'Understanding Convolution for Semantic Segmentation,' https://arxiv.org/abs/1702.08502. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. scale_factor : int Multiplier for spatial size. """ def __init__(self, in_channels, out_channels, scale_factor, **kwargs): super(DucBlock, self).__init__(**kwargs) mid_channels = (scale_factor * scale_factor) * out_channels with self.init_scope(): self.conv = conv3x3_block( in_channels=in_channels, out_channels=mid_channels) self.pix_shuffle = PixelShuffle(scale_factor=scale_factor) def __call__(self, x): x = self.conv(x) x = self.pix_shuffle(x) return x class SimpleSequential(Chain): """ A sequential chain that can be used instead of Sequential. """ def __init__(self): super(SimpleSequential, self).__init__() self.layer_names = [] def __setattr__(self, name, value): super(SimpleSequential, self).__setattr__(name, value) if self.within_init_scope and callable(value): self.layer_names.append(name) def __delattr__(self, name): super(SimpleSequential, self).__delattr__(name) try: self.layer_names.remove(name) except ValueError: pass def __len__(self): return len(self.layer_names) def __call__(self, x): for name in self.layer_names: x = self[name](x) return x def el(self, index): return self[self.layer_names[index]] class DualPathSequential(SimpleSequential): """ A sequential container for blocks with dual inputs/outputs. Blocks will be executed in the order they are added. Parameters: ---------- return_two : bool, default True Whether to return two output after execution. first_ordinals : int, default 0 Number of the first blocks with single input/output. last_ordinals : int, default 0 Number of the final blocks with single input/output. dual_path_scheme : function Scheme of dual path response for a block. dual_path_scheme_ordinal : function Scheme of dual path response for an ordinal block. """ def __init__(self, return_two=True, first_ordinals=0, last_ordinals=0, dual_path_scheme=(lambda block, x1, x2: block(x1, x2)), dual_path_scheme_ordinal=(lambda block, x1, x2: (block(x1), x2))): super(DualPathSequential, self).__init__() self.return_two = return_two self.first_ordinals = first_ordinals self.last_ordinals = last_ordinals self.dual_path_scheme = dual_path_scheme self.dual_path_scheme_ordinal = dual_path_scheme_ordinal def __call__(self, x1, x2=None): length = len(self.layer_names) for i, block_name in enumerate(self.layer_names): block = self[block_name] if (i < self.first_ordinals) or (i >= length - self.last_ordinals): x1, x2 = self.dual_path_scheme_ordinal(block, x1, x2) else: x1, x2 = self.dual_path_scheme(block, x1, x2) if self.return_two: return x1, x2 else: return x1 class Concurrent(SimpleSequential): """ A container for concatenation of modules on the base of the sequential container. Parameters: ---------- axis : int, default 1 The axis on which to concatenate the outputs. stack : bool, default False Whether to concatenate tensors along a new dimension. merge_type : str, default None Type of branch merging. """ def __init__(self, axis=1, stack=False, merge_type=None): super(Concurrent, self).__init__() assert (merge_type is None) or (merge_type in ["cat", "stack", "sum"]) self.axis = axis if merge_type is not None: self.merge_type = merge_type else: self.merge_type = "stack" if stack else "cat" def __call__(self, x): out = [] for name in self.layer_names: out.append(self[name](x)) if self.merge_type == "stack": out = F.stack(tuple(out), axis=self.axis) elif self.merge_type == "cat": out = F.concat(tuple(out), axis=self.axis) elif self.merge_type == "sum": out = F.sum(F.stack(tuple(out), axis=self.axis), self.axis) else: raise NotImplementedError() return out class SequentialConcurrent(SimpleSequential): """ A sequential container with concatenated outputs. Blocks will be executed in the order they are added. Parameters: ---------- axis : int, default 1 The axis on which to concatenate the outputs. stack : bool, default False Whether to concatenate tensors along a new dimension. cat_input : bool, default True Whether to concatenate input tensor. """ def __init__(self, axis=1, stack=False, cat_input=True): super(SequentialConcurrent, self).__init__() self.axis = axis self.stack = stack self.cat_input = cat_input def __call__(self, x): out = [x] if self.cat_input else [] for name in self.layer_names: x = self[name](x) out.append(x) if self.stack: out = F.stack(tuple(out), axis=self.axis) else: out = F.concat(tuple(out), axis=self.axis) return out class ParametricSequential(SimpleSequential): """ A sequential container for modules with parameters. Blocks will be executed in the order they are added. """ def __init__(self): super(ParametricSequential, self).__init__() def __call__(self, x, **kwargs): for name in self.layer_names: x = self[name](x, **kwargs) return x class ParametricConcurrent(SimpleSequential): """ A container for concatenation of modules on the base of the sequential container. Parameters: ---------- axis : int, default 1 The axis on which to concatenate the outputs. """ def __init__(self, axis=1): super(ParametricConcurrent, self).__init__() self.axis = axis def __call__(self, x, **kwargs): out = [] for name in self.layer_names: out.append(self[name](x, **kwargs)) out = F.concat(tuple(out), axis=self.axis) return out class Hourglass(Chain): """ A hourglass block. Parameters: ---------- down_seq : SimpleSequential Down modules as sequential. up_seq : SimpleSequential Up modules as sequential. skip_seq : SimpleSequential Skip connection modules as sequential. merge_type : str, default 'add' Type of concatenation of up and skip outputs. return_first_skip : bool, default False Whether return the first skip connection output. Used in ResAttNet. """ def __init__(self, down_seq, up_seq, skip_seq, merge_type="add", return_first_skip=False): super(Hourglass, self).__init__() self.depth = len(down_seq) assert (merge_type in ["cat", "add"]) assert (len(up_seq) == self.depth) assert (len(skip_seq) in (self.depth, self.depth + 1)) self.merge_type = merge_type self.return_first_skip = return_first_skip self.extra_skip = (len(skip_seq) == self.depth + 1) with self.init_scope(): self.down_seq = down_seq self.up_seq = up_seq self.skip_seq = skip_seq def _merge(self, x, y): if y is not None: if self.merge_type == "cat": x = F.concat((x, y), axis=1) elif self.merge_type == "add": x = x + y return x def __call__(self, x): y = None down_outs = [x] for down_module_name in self.down_seq.layer_names: down_module = self.down_seq[down_module_name] x = down_module(x) down_outs.append(x) for i in range(len(down_outs)): if i != 0: y = down_outs[self.depth - i] skip_module = self.skip_seq.el(self.depth - i) y = skip_module(y) x = self._merge(x, y) if i != len(down_outs) - 1: if (i == 0) and self.extra_skip: skip_module = self.skip_seq.el(self.depth) x = skip_module(x) up_module = self.up_seq.el(self.depth - 1 - i) x = up_module(x) if self.return_first_skip: return x, y else: return x class SesquialteralHourglass(Chain): """ A sesquialteral hourglass block. Parameters: ---------- down1_seq : SimpleSequential The first down modules as sequential. skip1_seq : SimpleSequential The first skip connection modules as sequential. up_seq : SimpleSequential Up modules as sequential. skip2_seq : SimpleSequential The second skip connection modules as sequential. down2_seq : SimpleSequential The second down modules as sequential. merge_type : str, default 'cat' Type of concatenation of up and skip outputs. """ def __init__(self, down1_seq, skip1_seq, up_seq, skip2_seq, down2_seq, merge_type="cat"): super(SesquialteralHourglass, self).__init__() assert (len(down1_seq) == len(up_seq)) assert (len(down1_seq) == len(down2_seq)) assert (len(skip1_seq) == len(skip2_seq)) assert (len(down1_seq) == len(skip1_seq) - 1) assert (merge_type in ["cat", "add"]) self.merge_type = merge_type self.depth = len(down1_seq) with self.init_scope(): self.down1_seq = down1_seq self.skip1_seq = skip1_seq self.up_seq = up_seq self.skip2_seq = skip2_seq self.down2_seq = down2_seq def _merge(self, x, y): if y is not None: if self.merge_type == "cat": x = F.concat((x, y), axis=1) elif self.merge_type == "add": x = x + y return x def __call__(self, x): y = self.skip1_seq[self.skip1_seq.layer_names[0]](x) skip1_outs = [y] for i in range(self.depth): x = self.down1_seq[self.down1_seq.layer_names[i]](x) y = self.skip1_seq[self.skip1_seq.layer_names[i + 1]](x) skip1_outs.append(y) x = skip1_outs[self.depth] y = self.skip2_seq[self.skip2_seq.layer_names[0]](x) skip2_outs = [y] for i in range(self.depth): x = self.up_seq[self.up_seq.layer_names[i]](x) y = skip1_outs[self.depth - 1 - i] x = self._merge(x, y) y = self.skip2_seq[self.skip2_seq.layer_names[i + 1]](x) skip2_outs.append(y) x = self.skip2_seq[self.skip2_seq.layer_names[self.depth]](x) for i in range(self.depth): x = self.down2_seq[self.down2_seq.layer_names[i]](x) y = skip2_outs[self.depth - 1 - i] x = self._merge(x, y) return x class MultiOutputSequential(SimpleSequential): """ A sequential container with multiple outputs. Blocks will be executed in the order they are added. Parameters: ---------- multi_output : bool, default True Whether to return multiple output. dual_output : bool, default False Whether to return dual output. return_last : bool, default True Whether to forcibly return last value. """ def __init__(self, multi_output=True, dual_output=False, return_last=True): super(MultiOutputSequential, self).__init__() self.multi_output = multi_output self.dual_output = dual_output self.return_last = return_last def __call__(self, x): outs = [] for name in self.layer_names: block = self[name] x = block(x) if hasattr(block, "do_output") and block.do_output: outs.append(x) elif hasattr(block, "do_output2") and block.do_output2: assert (type(x) == tuple) outs.extend(x[1]) x = x[0] if self.multi_output: return [x] + outs if self.return_last else outs elif self.dual_output: return x, outs else: return x class ParallelConcurent(SimpleSequential): """ A sequential container with multiple inputs and single/multiple outputs. Modules will be executed in the order they are added. Parameters: ---------- axis : int, default 1 The axis on which to concatenate the outputs. merge_type : str, default 'list' Type of branch merging. """ def __init__(self, axis=1, merge_type="list"): super(ParallelConcurent, self).__init__() assert (merge_type is None) or (merge_type in ["list", "cat", "stack", "sum"]) self.axis = axis self.merge_type = merge_type def __call__(self, x): out = [] for name, xi in zip(self.layer_names, x): out.append(self[name](xi)) if self.merge_type == "list": pass elif self.merge_type == "stack": out = F.stack(tuple(out), axis=self.axis) elif self.merge_type == "cat": out = F.concat(tuple(out), axis=self.axis) elif self.merge_type == "sum": out = F.sum(F.stack(tuple(out), axis=self.axis), self.axis) else: raise NotImplementedError() return out class DualPathParallelConcurent(SimpleSequential): """ A sequential container with multiple dual-path inputs and single/multiple outputs. Modules will be executed in the order they are added. Parameters: ---------- axis : int, default 1 The axis on which to concatenate the outputs. merge_type : str, default 'list' Type of branch merging. """ def __init__(self, axis=1, merge_type="list"): super(DualPathParallelConcurent, self).__init__() assert (merge_type is None) or (merge_type in ["list", "cat", "stack", "sum"]) self.axis = axis self.merge_type = merge_type def __call__(self, x1, x2): x1_out = [] x2_out = [] for name, x1i, x2i in zip(self.layer_names, x1, x2): y1i, y2i = self[name](x1i, x2i) x1_out.append(y1i) x2_out.append(y2i) if self.merge_type == "list": pass elif self.merge_type == "stack": x1_out = F.stack(tuple(x1_out), axis=self.axis) x2_out = F.stack(tuple(x2_out), axis=self.axis) elif self.merge_type == "cat": x1_out = F.concat(tuple(x1_out), axis=self.axis) x2_out = F.concat(tuple(x2_out), axis=self.axis) elif self.merge_type == "sum": x1_out = F.sum(F.stack(tuple(x1_out), axis=self.axis), self.axis) x2_out = F.sum(F.stack(tuple(x2_out), axis=self.axis), self.axis) else: raise NotImplementedError() return x1_out, x2_out class Flatten(Chain): """ Simple flatten block. """ def __call__(self, x): return x.reshape(x.shape[0], -1) class AdaptiveAvgPool2D(Chain): """ Simple adaptive average pooling block. """ def __call__(self, x): return F.average_pooling_2d(x, ksize=x.shape[2:]) class NormActivation(Chain): """ Activation block with preliminary batch normalization. It's used by itself as the final block in PreResNet. Parameters: ---------- in_channels : int Number of input channels. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default F.relu Activation function or name of activation function. """ def __init__(self, in_channels, bn_eps=1e-5, activation=(lambda: F.relu), **kwargs): super(NormActivation, self).__init__(**kwargs) with self.init_scope(): self.bn = L.BatchNormalization( size=in_channels, eps=bn_eps) self.activ = get_activation_layer(activation) def __call__(self, x): x = self.bn(x) x = self.activ(x) return x class InterpolationBlock(Chain): """ Interpolation block. Parameters: ---------- scale_factor : int Multiplier for spatial size. out_size : tuple of 2 int, default None Spatial size of the output tensor for the bilinear interpolation operation. up : bool, default True Whether to upsample or downsample. mode : str, default 'bilinear' Algorithm used for upsampling. align_corners : bool, default True Whether to align the corner pixels of the input and output tensors. """ def __init__(self, scale_factor, out_size=None, up=True, mode="bilinear", align_corners=True, **kwargs): super(InterpolationBlock, self).__init__(**kwargs) self.scale_factor = scale_factor self.out_size = out_size self.up = up self.mode = mode self.align_corners = align_corners def __call__(self, x, size=None): out_size = self.calc_out_size(x) if size is None else size return F.resize_images(x, output_shape=out_size, mode=self.mode, align_corners=self.align_corners) def calc_out_size(self, x): if self.out_size is not None: return self.out_size if self.up: return tuple(s * self.scale_factor for s in x.shape[2:]) else: return tuple(s // self.scale_factor for s in x.shape[2:]) class HeatmapMaxDetBlock(Chain): """ Heatmap maximum detector block (for human pose estimation task). """ def __init__(self, **kwargs): super(HeatmapMaxDetBlock, self).__init__(**kwargs) def __call__(self, x): heatmap = x vector_dim = 2 batch = heatmap.shape[0] channels = heatmap.shape[1] in_size = x.shape[2:] heatmap_vector = F.reshape(heatmap, shape=(batch, channels, -1)) indices = F.cast(F.expand_dims(F.argmax(heatmap_vector, axis=vector_dim), axis=vector_dim), np.float32) scores = F.max(heatmap_vector, axis=vector_dim, keepdims=True) scores_mask = (scores.array > 0.0).astype(np.float32) pts_x = (indices.array % in_size[1]) * scores_mask pts_y = (indices.array // in_size[1]) * scores_mask pts = F.concat((pts_x, pts_y, scores), axis=vector_dim).array for b in range(batch): for k in range(channels): hm = heatmap[b, k, :, :].array px = int(pts_x[b, k]) py = int(pts_y[b, k]) if (0 < px < in_size[1] - 1) and (0 < py < in_size[0] - 1): pts[b, k, 0] += np.sign(hm[py, px + 1] - hm[py, px - 1]) * 0.25 pts[b, k, 1] += np.sign(hm[py + 1, px] - hm[py - 1, px]) * 0.25 return pts
68,471
30.510354
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/lwopenpose_cmupan.py
""" Lightweight OpenPose 2D/3D for CMU Panoptic, implemented in Chainer. Original paper: 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004. """ __all__ = ['LwOpenPose', 'lwopenpose2d_mobilenet_cmupan_coco', 'lwopenpose3d_mobilenet_cmupan_coco', 'LwopDecoderFinalBlock'] import os import chainer.functions as F from chainer import Chain from chainer.serializers import load_npz from .common import conv1x1, conv1x1_block, conv3x3_block, dwsconv3x3_block, SimpleSequential class LwopResBottleneck(Chain): """ Bottleneck block for residual path in the residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. use_bias : bool, default True Whether the layer uses a bias vector. bottleneck_factor : int, default 2 Bottleneck factor. squeeze_out : bool, default False Whether to squeeze the output channels. """ def __init__(self, in_channels, out_channels, stride, use_bias=True, bottleneck_factor=2, squeeze_out=False): super(LwopResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor if squeeze_out else in_channels // bottleneck_factor with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_bias=use_bias) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, use_bias=use_bias) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, use_bias=use_bias, activation=None) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class LwopResUnit(Chain): """ ResNet-like residual unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. use_bias : bool, default True Whether the layer uses a bias vector. bottleneck_factor : int, default 2 Bottleneck factor. squeeze_out : bool, default False Whether to squeeze the output channels. activate : bool, default False Whether to activate the sum. """ def __init__(self, in_channels, out_channels, stride=1, use_bias=True, bottleneck_factor=2, squeeze_out=False, activate=False): super(LwopResUnit, self).__init__() self.activate = activate self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): self.body = LwopResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=use_bias, bottleneck_factor=bottleneck_factor, squeeze_out=squeeze_out) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=use_bias, activation=None) if self.activate: self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity if self.activate: x = self.activ(x) return x class LwopEncoderFinalBlock(Chain): """ Lightweight OpenPose 2D/3D specific encoder final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(LwopEncoderFinalBlock, self).__init__() with self.init_scope(): self.pre_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=True, use_bn=False) self.body = SimpleSequential() with self.body.init_scope(): for i in range(3): setattr(self.body, "block{}".format(i + 1), dwsconv3x3_block( in_channels=out_channels, out_channels=out_channels, dw_use_bn=False, pw_use_bn=False, dw_activation=(lambda: F.elu), pw_activation=(lambda: F.elu))) self.post_conv = conv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bias=True, use_bn=False) def __call__(self, x): x = self.pre_conv(x) x = x + self.body(x) x = self.post_conv(x) return x class LwopRefinementBlock(Chain): """ Lightweight OpenPose 2D/3D specific refinement block for decoder units. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(LwopRefinementBlock, self).__init__() with self.init_scope(): self.pre_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=True, use_bn=False) self.body = SimpleSequential() with self.body.init_scope(): setattr(self.body, "block1", conv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bias=True)) setattr(self.body, "block2", conv3x3_block( in_channels=out_channels, out_channels=out_channels, pad=2, dilate=2, use_bias=True)) def __call__(self, x): x = self.pre_conv(x) x = x + self.body(x) return x class LwopDecoderBend(Chain): """ Lightweight OpenPose 2D/3D specific decoder bend block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, mid_channels, out_channels): super(LwopDecoderBend, self).__init__() with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_bias=True, use_bn=False) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, use_bias=True) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class LwopDecoderInitBlock(Chain): """ Lightweight OpenPose 2D/3D specific decoder init block. Parameters: ---------- in_channels : int Number of input channels. keypoints : int Number of keypoints. """ def __init__(self, in_channels, keypoints): super(LwopDecoderInitBlock, self).__init__() num_heatmap = keypoints num_paf = 2 * keypoints bend_mid_channels = 512 with self.init_scope(): self.body = SimpleSequential() with self.body.init_scope(): for i in range(3): setattr(self.body, "block{}".format(i + 1), conv3x3_block( in_channels=in_channels, out_channels=in_channels, use_bias=True, use_bn=False)) self.heatmap_bend = LwopDecoderBend( in_channels=in_channels, mid_channels=bend_mid_channels, out_channels=num_heatmap) self.paf_bend = LwopDecoderBend( in_channels=in_channels, mid_channels=bend_mid_channels, out_channels=num_paf) def __call__(self, x): y = self.body(x) heatmap = self.heatmap_bend(y) paf = self.paf_bend(y) y = F.concat((x, heatmap, paf), axis=1) return y class LwopDecoderUnit(Chain): """ Lightweight OpenPose 2D/3D specific decoder init. Parameters: ---------- in_channels : int Number of input channels. keypoints : int Number of keypoints. """ def __init__(self, in_channels, keypoints): super(LwopDecoderUnit, self).__init__() num_heatmap = keypoints num_paf = 2 * keypoints self.features_channels = in_channels - num_heatmap - num_paf with self.init_scope(): self.body = SimpleSequential() with self.body.init_scope(): for i in range(5): setattr(self.body, "block{}".format(i + 1), LwopRefinementBlock( in_channels=in_channels, out_channels=self.features_channels)) in_channels = self.features_channels self.heatmap_bend = LwopDecoderBend( in_channels=self.features_channels, mid_channels=self.features_channels, out_channels=num_heatmap) self.paf_bend = LwopDecoderBend( in_channels=self.features_channels, mid_channels=self.features_channels, out_channels=num_paf) def __call__(self, x): features = x[:, :self.features_channels] y = self.body(x) heatmap = self.heatmap_bend(y) paf = self.paf_bend(y) y = F.concat((features, heatmap, paf), axis=1) return y class LwopDecoderFeaturesBend(Chain): """ Lightweight OpenPose 2D/3D specific decoder 3D features bend. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, mid_channels, out_channels): super(LwopDecoderFeaturesBend, self).__init__() with self.init_scope(): self.body = SimpleSequential() with self.body.init_scope(): for i in range(2): setattr(self.body, "block{}".format(i + 1), LwopRefinementBlock( in_channels=in_channels, out_channels=mid_channels)) in_channels = mid_channels self.features_bend = LwopDecoderBend( in_channels=mid_channels, mid_channels=mid_channels, out_channels=out_channels) def __call__(self, x): x = self.body(x) x = self.features_bend(x) return x class LwopDecoderFinalBlock(Chain): """ Lightweight OpenPose 2D/3D specific decoder final block for calcualation 3D poses. Parameters: ---------- in_channels : int Number of input channels. keypoints : int Number of keypoints. bottleneck_factor : int Bottleneck factor. calc_3d_features : bool Whether to calculate 3D features. """ def __init__(self, in_channels, keypoints, bottleneck_factor, calc_3d_features): super(LwopDecoderFinalBlock, self).__init__() self.num_heatmap_paf = 3 * keypoints self.calc_3d_features = calc_3d_features features_out_channels = self.num_heatmap_paf features_in_channels = in_channels - features_out_channels if self.calc_3d_features: with self.init_scope(): self.body = SimpleSequential() with self.body.init_scope(): for i in range(5): setattr(self.body, "block{}".format(i + 1), LwopResUnit( in_channels=in_channels, out_channels=features_in_channels, bottleneck_factor=bottleneck_factor)) in_channels = features_in_channels self.features_bend = LwopDecoderFeaturesBend( in_channels=features_in_channels, mid_channels=features_in_channels, out_channels=features_out_channels) def __call__(self, x): heatmap_paf_2d = x[:, -self.num_heatmap_paf:] if not self.calc_3d_features: return heatmap_paf_2d x = self.body(x) x = self.features_bend(x) y = F.concat((heatmap_paf_2d, x), axis=1) return y class LwOpenPose(Chain): """ Lightweight OpenPose 2D/3D model from 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004. Parameters: ---------- encoder_channels : list of list of int Number of output channels for each encoder unit. encoder_paddings : list of list of int Padding/dilation value for each encoder unit. encoder_init_block_channels : int Number of output channels for the encoder initial unit. encoder_final_block_channels : int Number of output channels for the encoder final unit. refinement_units : int Number of refinement blocks in the decoder. calc_3d_features : bool Whether to calculate 3D features. return_heatmap : bool, default True Whether to return only heatmap. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (256, 192) Spatial size of the expected input image. keypoints : int, default 19 Number of keypoints. """ def __init__(self, encoder_channels, encoder_paddings, encoder_init_block_channels, encoder_final_block_channels, refinement_units, calc_3d_features, return_heatmap=True, in_channels=3, in_size=(368, 368), keypoints=19): super(LwOpenPose, self).__init__() assert (in_channels == 3) self.in_size = in_size self.keypoints = keypoints self.return_heatmap = return_heatmap self.calc_3d_features = calc_3d_features num_heatmap_paf = 3 * keypoints with self.init_scope(): self.encoder = SimpleSequential() with self.encoder.init_scope(): backbone = SimpleSequential() with backbone.init_scope(): setattr(backbone, "init_block", conv3x3_block( in_channels=in_channels, out_channels=encoder_init_block_channels, stride=2)) in_channels = encoder_init_block_channels for i, channels_per_stage in enumerate(encoder_channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 pad = encoder_paddings[i][j] setattr(stage, "unit{}".format(j + 1), dwsconv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, pad=pad, dilate=pad)) in_channels = out_channels setattr(backbone, "stage{}".format(i + 1), stage) setattr(self.encoder, "backbone", backbone) setattr(self.encoder, "final_block", LwopEncoderFinalBlock( in_channels=in_channels, out_channels=encoder_final_block_channels)) in_channels = encoder_final_block_channels self.decoder = SimpleSequential() with self.decoder.init_scope(): setattr(self.decoder, "init_block", LwopDecoderInitBlock( in_channels=in_channels, keypoints=keypoints)) in_channels = encoder_final_block_channels + num_heatmap_paf for i in range(refinement_units): setattr(self.decoder, "unit{}".format(i + 1), LwopDecoderUnit( in_channels=in_channels, keypoints=keypoints)) setattr(self.decoder, "final_block", LwopDecoderFinalBlock( in_channels=in_channels, keypoints=keypoints, bottleneck_factor=2, calc_3d_features=calc_3d_features)) def __call__(self, x): x = self.encoder(x) x = self.decoder(x) if self.return_heatmap: return x else: return x def get_lwopenpose(calc_3d_features, keypoints, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create Lightweight OpenPose 2D/3D model with specific parameters. Parameters: ---------- calc_3d_features : bool, default False Whether to calculate 3D features. keypoints : int Number of keypoints. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ encoder_channels = [[64], [128, 128], [256, 256, 512, 512, 512, 512, 512, 512]] encoder_paddings = [[1], [1, 1], [1, 1, 1, 2, 1, 1, 1, 1]] encoder_init_block_channels = 32 encoder_final_block_channels = 128 refinement_units = 1 net = LwOpenPose( encoder_channels=encoder_channels, encoder_paddings=encoder_paddings, encoder_init_block_channels=encoder_init_block_channels, encoder_final_block_channels=encoder_final_block_channels, refinement_units=refinement_units, calc_3d_features=calc_3d_features, keypoints=keypoints, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def lwopenpose2d_mobilenet_cmupan_coco(keypoints=19, **kwargs): """ Lightweight OpenPose 2D model on the base of MobileNet for CMU Panoptic from 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004. Parameters: ---------- keypoints : int, default 19 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_lwopenpose(calc_3d_features=False, keypoints=keypoints, model_name="lwopenpose2d_mobilenet_cmupan_coco", **kwargs) def lwopenpose3d_mobilenet_cmupan_coco(keypoints=19, **kwargs): """ Lightweight OpenPose 3D model on the base of MobileNet for CMU Panoptic from 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004. Parameters: ---------- keypoints : int, default 19 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_lwopenpose(calc_3d_features=True, keypoints=keypoints, model_name="lwopenpose3d_mobilenet_cmupan_coco", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False in_size = (368, 368) keypoints = 19 return_heatmap = True pretrained = False models = [ (lwopenpose2d_mobilenet_cmupan_coco, "2d"), (lwopenpose3d_mobilenet_cmupan_coco, "3d"), ] for model, model_dim in models: net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != lwopenpose2d_mobilenet_cmupan_coco or weight_count == 4091698) assert (model != lwopenpose3d_mobilenet_cmupan_coco or weight_count == 5085983) batch = 14 x = np.random.rand(batch, 3, in_size[0], in_size[1]).astype(np.float32) y = net(x) if model_dim == "2d": assert (y.shape == (batch, 3 * keypoints, in_size[0] // 8, in_size[0] // 8)) else: assert (y.shape == (batch, 6 * keypoints, in_size[0] // 8, in_size[0] // 8)) if __name__ == "__main__": _test()
22,569
33.563553
119
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/rir_cifar.py
""" RiR for CIFAR/SVHN, implemented in Chainer. Original paper: 'Resnet in Resnet: Generalizing Residual Architectures,' https://arxiv.org/abs/1603.08029. """ __all__ = ['CIFARRiR', 'rir_cifar10', 'rir_cifar100', 'rir_svhn', 'RiRFinalBlock'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, conv3x3, conv1x1_block, conv3x3_block, DualPathSequential, SimpleSequential class PostActivation(Chain): """ Pure pre-activation block without convolution layer. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(PostActivation, self).__init__() with self.init_scope(): self.bn = L.BatchNormalization( size=in_channels, eps=1e-5) self.activ = F.relu def __call__(self, x): x = self.bn(x) x = self.activ(x) return x class RiRUnit(Chain): """ RiR unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(RiRUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): self.res_pass_conv = conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride) self.trans_pass_conv = conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride) self.res_cross_conv = conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride) self.trans_cross_conv = conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride) self.res_postactiv = PostActivation(in_channels=out_channels) self.trans_postactiv = PostActivation(in_channels=out_channels) if self.resize_identity: self.identity_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, stride=stride) def __call__(self, x_res, x_trans): if self.resize_identity: x_res_identity = self.identity_conv(x_res) else: x_res_identity = x_res y_res = self.res_cross_conv(x_res) y_trans = self.trans_cross_conv(x_trans) x_res = self.res_pass_conv(x_res) x_trans = self.trans_pass_conv(x_trans) x_res = x_res + x_res_identity + y_trans x_trans = x_trans + y_res x_res = self.res_postactiv(x_res) x_trans = self.trans_postactiv(x_trans) return x_res, x_trans class RiRInitBlock(Chain): """ RiR initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(RiRInitBlock, self).__init__() with self.init_scope(): self.res_conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels) self.trans_conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels) def __call__(self, x, _): x_res = self.res_conv(x) x_trans = self.trans_conv(x) return x_res, x_trans class RiRFinalBlock(Chain): """ RiR final block. """ def __init__(self): super(RiRFinalBlock, self).__init__() def __call__(self, x_res, x_trans): x = F.concat((x_res, x_trans), axis=1) return x, None class CIFARRiR(Chain): """ RiR model for CIFAR from 'Resnet in Resnet: Generalizing Residual Architectures,' https://arxiv.org/abs/1603.08029. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, in_channels=3, in_size=(32, 32), classes=10): super(CIFARRiR, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = DualPathSequential( return_two=False, first_ordinals=0, last_ordinals=0) with self.features.init_scope(): setattr(self.features, "init_block", RiRInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), RiRUnit( in_channels=in_channels, out_channels=out_channels, stride=stride)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", RiRFinalBlock()) in_channels = final_block_channels self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "final_conv", conv1x1_block( in_channels=in_channels, out_channels=classes, activation=None)) setattr(self.output, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) setattr(self.output, "final_flatten", partial( F.reshape, shape=(-1, classes))) def __call__(self, x): x = self.features(x, x) x = self.output(x) return x def get_rir_cifar(classes, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create RiR model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ channels = [[48, 48, 48, 48], [96, 96, 96, 96, 96, 96], [192, 192, 192, 192, 192, 192]] init_block_channels = 48 final_block_channels = 384 net = CIFARRiR( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def rir_cifar10(classes=10, **kwargs): """ RiR model for CIFAR-10 from 'Resnet in Resnet: Generalizing Residual Architectures,' https://arxiv.org/abs/1603.08029. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_rir_cifar(classes=classes, model_name="rir_cifar10", **kwargs) def rir_cifar100(classes=100, **kwargs): """ RiR model for CIFAR-100 from 'Resnet in Resnet: Generalizing Residual Architectures,' https://arxiv.org/abs/1603.08029. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_rir_cifar(classes=classes, model_name="rir_cifar100", **kwargs) def rir_svhn(classes=10, **kwargs): """ RiR model for SVHN from 'Resnet in Resnet: Generalizing Residual Architectures,' https://arxiv.org/abs/1603.08029. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_rir_cifar(classes=classes, model_name="rir_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (rir_cifar10, 10), (rir_cifar100, 100), (rir_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != rir_cifar10 or weight_count == 9492980) assert (model != rir_cifar100 or weight_count == 9527720) assert (model != rir_svhn or weight_count == 9492980) x = np.zeros((1, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (1, classes)) if __name__ == "__main__": _test()
10,937
30.612717
119
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/diapreresnet.py
""" DIA-PreResNet for ImageNet-1K, implemented in Chainer. Original papers: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. """ __all__ = ['DIAPreResNet', 'diapreresnet10', 'diapreresnet12', 'diapreresnet14', 'diapreresnetbc14b', 'diapreresnet16', 'diapreresnet18', 'diapreresnet26', 'diapreresnetbc26b', 'diapreresnet34', 'diapreresnetbc38b', 'diapreresnet50', 'diapreresnet50b', 'diapreresnet101', 'diapreresnet101b', 'diapreresnet152', 'diapreresnet152b', 'diapreresnet200', 'diapreresnet200b', 'diapreresnet269b', 'DIAPreResUnit'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, DualPathSequential, SimpleSequential from .preresnet import PreResBlock, PreResBottleneck, PreResInitBlock, PreResActivation from .diaresnet import DIAAttention class DIAPreResUnit(Chain): """ DIA-PreResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. attention : nn.Module, default None Attention module. hold_attention : bool, default False Whether hold attention module. """ def __init__(self, in_channels, out_channels, stride, bottleneck, conv1_stride, attention=None, hold_attention=True): super(DIAPreResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): if bottleneck: self.body = PreResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride) else: self.body = PreResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, stride=stride) if hold_attention: self.attention = attention if not hold_attention: self.attention = attention def __call__(self, x, hc=None): identity = x x, x_pre_activ = self.body(x) if self.resize_identity: identity = self.identity_conv(x_pre_activ) x, hc = self.attention(x, hc) x = x + identity return x, hc class DIAPreResNet(Chain): """ DIA-PreResNet model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000): super(DIAPreResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential(return_two=False) attention = DIAAttention( in_x_features=channels_per_stage[0], in_h_features=channels_per_stage[0]) with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), DIAPreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride, attention=attention, hold_attention=(j == 0))) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "post_activ", PreResActivation( in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_diapreresnet(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create DIA-PreResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] elif blocks == 269: layers = [3, 30, 48, 8] else: raise ValueError("Unsupported DIA-PreResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = DIAPreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def diapreresnet10(**kwargs): """ DIA-PreResNet-10 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=10, model_name="diapreresnet10", **kwargs) def diapreresnet12(**kwargs): """ DIA-PreResNet-12 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=12, model_name="diapreresnet12", **kwargs) def diapreresnet14(**kwargs): """ DIA-PreResNet-14 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=14, model_name="diapreresnet14", **kwargs) def diapreresnetbc14b(**kwargs): """ DIA-PreResNet-BC-14b model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="diapreresnetbc14b", **kwargs) def diapreresnet16(**kwargs): """ DIA-PreResNet-16 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=16, model_name="diapreresnet16", **kwargs) def diapreresnet18(**kwargs): """ DIA-PreResNet-18 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=18, model_name="diapreresnet18", **kwargs) def diapreresnet26(**kwargs): """ DIA-PreResNet-26 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=26, bottleneck=False, model_name="diapreresnet26", **kwargs) def diapreresnetbc26b(**kwargs): """ DIA-PreResNet-BC-26b model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="diapreresnetbc26b", **kwargs) def diapreresnet34(**kwargs): """ DIA-PreResNet-34 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=34, model_name="diapreresnet34", **kwargs) def diapreresnetbc38b(**kwargs): """ DIA-PreResNet-BC-38b model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="diapreresnetbc38b", **kwargs) def diapreresnet50(**kwargs): """ DIA-PreResNet-50 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=50, model_name="diapreresnet50", **kwargs) def diapreresnet50b(**kwargs): """ DIA-PreResNet-50 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=50, conv1_stride=False, model_name="diapreresnet50b", **kwargs) def diapreresnet101(**kwargs): """ DIA-PreResNet-101 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=101, model_name="diapreresnet101", **kwargs) def diapreresnet101b(**kwargs): """ DIA-PreResNet-101 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=101, conv1_stride=False, model_name="diapreresnet101b", **kwargs) def diapreresnet152(**kwargs): """ DIA-PreResNet-152 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=152, model_name="diapreresnet152", **kwargs) def diapreresnet152b(**kwargs): """ DIA-PreResNet-152 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=152, conv1_stride=False, model_name="diapreresnet152b", **kwargs) def diapreresnet200(**kwargs): """ DIA-PreResNet-200 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=200, model_name="diapreresnet200", **kwargs) def diapreresnet200b(**kwargs): """ DIA-PreResNet-200 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=200, conv1_stride=False, model_name="diapreresnet200b", **kwargs) def diapreresnet269b(**kwargs): """ DIA-PreResNet-269 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=269, conv1_stride=False, model_name="diapreresnet269b", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ diapreresnet10, diapreresnet12, diapreresnet14, diapreresnetbc14b, diapreresnet16, diapreresnet18, diapreresnet26, diapreresnetbc26b, diapreresnet34, diapreresnetbc38b, diapreresnet50, diapreresnet50b, diapreresnet101, diapreresnet101b, diapreresnet152, diapreresnet152b, diapreresnet200, diapreresnet200b, diapreresnet269b, ] for model in models: net = model(pretrained=pretrained) # net.to_gpu() weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != diapreresnet10 or weight_count == 6295688) assert (model != diapreresnet12 or weight_count == 6369672) assert (model != diapreresnet14 or weight_count == 6665096) assert (model != diapreresnetbc14b or weight_count == 24016424) assert (model != diapreresnet16 or weight_count == 7845768) assert (model != diapreresnet18 or weight_count == 12566408) assert (model != diapreresnet26 or weight_count == 18837128) assert (model != diapreresnetbc26b or weight_count == 29946664) assert (model != diapreresnet34 or weight_count == 22674568) assert (model != diapreresnetbc38b or weight_count == 35876904) assert (model != diapreresnet50 or weight_count == 39508520) assert (model != diapreresnet50b or weight_count == 39508520) assert (model != diapreresnet101 or weight_count == 58500648) assert (model != diapreresnet101b or weight_count == 58500648) assert (model != diapreresnet152 or weight_count == 74144296) assert (model != diapreresnet152b or weight_count == 74144296) assert (model != diapreresnet200 or weight_count == 78625320) assert (model != diapreresnet200b or weight_count == 78625320) assert (model != diapreresnet269b or weight_count == 116024872) x = np.zeros((1, 3, 224, 224), np.float32) # import cupy # x = cupy.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
21,902
34.730832
119
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/jasperdr.py
""" Jasper DR (Dense Residual) for ASR, implemented in Chainer. Original paper: 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. """ __all__ = ['jasperdr10x5_en', 'jasperdr10x5_en_nr'] from .jasper import get_jasper def jasperdr10x5_en(classes=29, **kwargs): """ Jasper DR 10x5 model for English language from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_jasper(classes=classes, version=("jasper", "10x5"), use_dr=True, model_name="jasperdr10x5_en", **kwargs) def jasperdr10x5_en_nr(classes=29, **kwargs): """ Jasper DR 10x5 model for English language (with presence of noise) from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_jasper(classes=classes, version=("jasper", "10x5"), use_dr=True, model_name="jasperdr10x5_en_nr", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False audio_features = 64 models = [ jasperdr10x5_en, jasperdr10x5_en_nr, ] for model in models: net = model( in_channels=audio_features, pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != jasperdr10x5_en or weight_count == 332632349) assert (model != jasperdr10x5_en_nr or weight_count == 332632349) batch = 3 seq_len = np.random.randint(60, 150, batch) seq_len_max = seq_len.max() + 2 x = np.random.rand(batch, audio_features, seq_len_max).astype(np.float32) x_len = seq_len.astype(np.long) y, y_len = net(x, x_len) assert (y.shape[:2] == (batch, net.classes)) assert (y.shape[2] in [seq_len_max // 2, seq_len_max // 2 + 1]) if __name__ == "__main__": _test()
2,668
30.77381
119
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/deeplabv3.py
""" DeepLabv3 for image segmentation, implemented in Chainer. Original paper: 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. """ __all__ = ['DeepLabv3', 'deeplabv3_resnetd50b_voc', 'deeplabv3_resnetd101b_voc', 'deeplabv3_resnetd152b_voc', 'deeplabv3_resnetd50b_coco', 'deeplabv3_resnetd101b_coco', 'deeplabv3_resnetd152b_coco', 'deeplabv3_resnetd50b_ade20k', 'deeplabv3_resnetd101b_ade20k', 'deeplabv3_resnetd50b_cityscapes', 'deeplabv3_resnetd101b_cityscapes'] import os import chainer.functions as F from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent from .resnetd import resnetd50b, resnetd101b, resnetd152b class DeepLabv3FinalBlock(Chain): """ DeepLabv3 final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, bottleneck_factor=4): super(DeepLabv3FinalBlock, self).__init__() assert (in_channels % bottleneck_factor == 0) mid_channels = in_channels // bottleneck_factor with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels) self.dropout = partial( F.dropout, ratio=0.1) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, use_bias=True) def __call__(self, x, out_size): x = self.conv1(x) x = self.dropout(x) x = self.conv2(x) x = F.resize_images(x, output_shape=out_size) return x class ASPPAvgBranch(Chain): """ ASPP branch with average pooling. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. upscale_out_size : tuple of 2 int Spatial size of output image for the bilinear upsampling operation. """ def __init__(self, in_channels, out_channels, upscale_out_size): super(ASPPAvgBranch, self).__init__() self.upscale_out_size = upscale_out_size with self.init_scope(): self.pool = partial( F.average_pooling_2d, ksize=1) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels) def __call__(self, x): in_size = self.upscale_out_size if self.upscale_out_size is not None else x.shape[2:] x = self.pool(x) x = self.conv(x) x = F.resize_images(x, output_shape=in_size) return x class AtrousSpatialPyramidPooling(Chain): """ Atrous Spatial Pyramid Pooling (ASPP) module. Parameters: ---------- in_channels : int Number of input channels. upscale_out_size : tuple of 2 int Spatial size of the input tensor for the bilinear upsampling operation. """ def __init__(self, in_channels, upscale_out_size): super(AtrousSpatialPyramidPooling, self).__init__() atrous_rates = [12, 24, 36] assert (in_channels % 8 == 0) mid_channels = in_channels // 8 project_in_channels = 5 * mid_channels with self.init_scope(): self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", conv1x1_block( in_channels=in_channels, out_channels=mid_channels)) for i, atrous_rate in enumerate(atrous_rates): setattr(self.branches, "branch{}".format(i + 2), conv3x3_block( in_channels=in_channels, out_channels=mid_channels, pad=atrous_rate, dilate=atrous_rate)) setattr(self.branches, "branch5", ASPPAvgBranch( in_channels=in_channels, out_channels=mid_channels, upscale_out_size=upscale_out_size)) self.conv = conv1x1_block( in_channels=project_in_channels, out_channels=mid_channels) self.dropout = partial( F.dropout, ratio=0.5) def __call__(self, x): x = self.branches(x) x = self.conv(x) x = self.dropout(x) return x class DeepLabv3(Chain): """ DeepLabv3 model from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int, default 2048 Number of output channels form feature extractor. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default True Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (480, 480) Spatial size of the expected input image. classes : int, default 21 Number of segmentation classes. """ def __init__(self, backbone, backbone_out_channels=2048, aux=False, fixed_size=True, in_channels=3, in_size=(480, 480), classes=21): super(DeepLabv3, self).__init__() assert (in_channels > 0) self.in_size = in_size self.classes = classes self.aux = aux self.fixed_size = fixed_size with self.init_scope(): self.backbone = backbone pool_out_size = (self.in_size[0] // 8, self.in_size[1] // 8) if fixed_size else None self.pool = AtrousSpatialPyramidPooling( in_channels=backbone_out_channels, upscale_out_size=pool_out_size) pool_out_channels = backbone_out_channels // 8 self.final_block = DeepLabv3FinalBlock( in_channels=pool_out_channels, out_channels=classes, bottleneck_factor=1) if self.aux: aux_out_channels = backbone_out_channels // 2 self.aux_block = DeepLabv3FinalBlock( in_channels=aux_out_channels, out_channels=classes, bottleneck_factor=4) def __call__(self, x): in_size = self.in_size if self.fixed_size else x.shape[2:] x, y = self.backbone(x) x = self.pool(x) x = self.final_block(x, in_size) if self.aux: y = self.aux_block(y, in_size) return x, y else: return x def get_deeplabv3(backbone, classes, aux=False, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create DeepLabv3 model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. num_classes : int Number of segmentation classes. aux : bool, default False Whether to output an auxiliary result. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ net = DeepLabv3( backbone=backbone, classes=classes, aux=aux, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def deeplabv3_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-50b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone.final_pool return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_voc", **kwargs) def deeplabv3_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-101b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone.final_pool return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_voc", **kwargs) def deeplabv3_resnetd152b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-152b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone.final_pool return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd152b_voc", **kwargs) def deeplabv3_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-50b for COCO from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone.final_pool return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_coco", **kwargs) def deeplabv3_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-101b for COCO from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone.final_pool return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_coco", **kwargs) def deeplabv3_resnetd152b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-152b for COCO from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone.final_pool return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd152b_coco", **kwargs) def deeplabv3_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-50b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 150 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone.final_pool return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_ade20k", **kwargs) def deeplabv3_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-101b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 150 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone.final_pool return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_ade20k", **kwargs) def deeplabv3_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-50b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone.final_pool return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_cityscapes", **kwargs) def deeplabv3_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-101b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone.final_pool return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_cityscapes", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False in_size = (480, 480) aux = False pretrained = False models = [ (deeplabv3_resnetd50b_voc, 21), (deeplabv3_resnetd101b_voc, 21), (deeplabv3_resnetd152b_voc, 21), (deeplabv3_resnetd50b_coco, 21), (deeplabv3_resnetd101b_coco, 21), (deeplabv3_resnetd152b_coco, 21), (deeplabv3_resnetd50b_ade20k, 150), (deeplabv3_resnetd101b_ade20k, 150), (deeplabv3_resnetd50b_cityscapes, 19), (deeplabv3_resnetd101b_cityscapes, 19), ] for model, classes in models: net = model(pretrained=pretrained, in_size=in_size, aux=aux) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) if aux: assert (model != deeplabv3_resnetd50b_voc or weight_count == 42127850) assert (model != deeplabv3_resnetd101b_voc or weight_count == 61119978) assert (model != deeplabv3_resnetd152b_voc or weight_count == 76763626) assert (model != deeplabv3_resnetd50b_coco or weight_count == 42127850) assert (model != deeplabv3_resnetd101b_coco or weight_count == 61119978) assert (model != deeplabv3_resnetd152b_coco or weight_count == 76763626) assert (model != deeplabv3_resnetd50b_ade20k or weight_count == 42194156) assert (model != deeplabv3_resnetd101b_ade20k or weight_count == 61186284) assert (model != deeplabv3_resnetd50b_cityscapes or weight_count == 42126822) assert (model != deeplabv3_resnetd101b_cityscapes or weight_count == 61118950) else: assert (model != deeplabv3_resnetd50b_voc or weight_count == 39762645) assert (model != deeplabv3_resnetd101b_voc or weight_count == 58754773) assert (model != deeplabv3_resnetd152b_voc or weight_count == 74398421) assert (model != deeplabv3_resnetd50b_coco or weight_count == 39762645) assert (model != deeplabv3_resnetd101b_coco or weight_count == 58754773) assert (model != deeplabv3_resnetd152b_coco or weight_count == 74398421) assert (model != deeplabv3_resnetd50b_ade20k or weight_count == 39795798) assert (model != deeplabv3_resnetd101b_ade20k or weight_count == 58787926) assert (model != deeplabv3_resnetd50b_cityscapes or weight_count == 39762131) assert (model != deeplabv3_resnetd101b_cityscapes or weight_count == 58754259) x = np.zeros((1, 3, in_size[0], in_size[1]), np.float32) ys = net(x) y = ys[0] if aux else ys assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and (y.shape[3] == x.shape[3])) if __name__ == "__main__": _test()
21,837
38.06619
119
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/fpenet.py
""" FPENet for image segmentation, implemented in Chainer. Original paper: 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1909.08599. """ __all__ = ['FPENet', 'fpenet_cityscapes'] import os import chainer.functions as F from chainer import Chain from chainer.serializers import load_npz from .common import conv1x1, conv1x1_block, conv3x3_block, SEBlock, InterpolationBlock, SimpleSequential,\ MultiOutputSequential class FPEBlock(Chain): """ FPENet block. Parameters: ---------- channels : int Number of input/output channels. """ def __init__(self, channels, **kwargs): super(FPEBlock, self).__init__(**kwargs) dilates = [1, 2, 4, 8] assert (channels % len(dilates) == 0) mid_channels = channels // len(dilates) with self.init_scope(): self.blocks = SimpleSequential() with self.blocks.init_scope(): for i, dilate in enumerate(dilates): setattr(self.blocks, "block{}".format(i + 1), conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, groups=mid_channels, dilate=dilate, pad=dilate)) def __call__(self, x): xs = F.split_axis(x, indices_or_sections=len(self.blocks.layer_names), axis=1) ys = [] for bni, xsi in zip(self.blocks.layer_names, xs): bi = self.blocks[bni] if len(ys) == 0: ys.append(bi(xsi)) else: ys.append(bi(xsi + ys[-1])) x = F.concat(ys, axis=1) return x class FPEUnit(Chain): """ FPENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. bottleneck_factor : int Bottleneck factor. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, stride, bottleneck_factor, use_se, **kwargs): super(FPEUnit, self).__init__(**kwargs) self.resize_identity = (in_channels != out_channels) or (stride != 1) self.use_se = use_se mid1_channels = in_channels * bottleneck_factor with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid1_channels, stride=stride) self.block = FPEBlock(channels=mid1_channels) self.conv2 = conv1x1_block( in_channels=mid1_channels, out_channels=out_channels, activation=None) if self.use_se: self.se = SEBlock(channels=out_channels) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.conv1(x) x = self.block(x) x = self.conv2(x) if self.use_se: x = self.se(x) x = x + identity x = self.activ(x) return x class FPEStage(Chain): """ FPENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. layers : int Number of layers. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, layers, use_se, **kwargs): super(FPEStage, self).__init__(**kwargs) self.use_block = (layers > 1) with self.init_scope(): if self.use_block: self.down = FPEUnit( in_channels=in_channels, out_channels=out_channels, stride=2, bottleneck_factor=4, use_se=use_se) self.blocks = SimpleSequential() with self.blocks.init_scope(): for i in range(layers - 1): setattr(self.blocks, "block{}".format(i + 1), FPEUnit( in_channels=out_channels, out_channels=out_channels, stride=1, bottleneck_factor=1, use_se=use_se)) else: self.down = FPEUnit( in_channels=in_channels, out_channels=out_channels, stride=1, bottleneck_factor=1, use_se=use_se) def __call__(self, x): x = self.down(x) if self.use_block: y = self.blocks(x) x = x + y return x class MEUBlock(Chain): """ FPENet specific mutual embedding upsample (MEU) block. Parameters: ---------- in_channels_high : int Number of input channels for x_high. in_channels_low : int Number of input channels for x_low. out_channels : int Number of output channels. """ def __init__(self, in_channels_high, in_channels_low, out_channels, **kwargs): super(MEUBlock, self).__init__(**kwargs) with self.init_scope(): self.conv_high = conv1x1_block( in_channels=in_channels_high, out_channels=out_channels, activation=None) self.conv_low = conv1x1_block( in_channels=in_channels_low, out_channels=out_channels, activation=None) self.conv_w_high = conv1x1( in_channels=out_channels, out_channels=out_channels) self.conv_w_low = conv1x1( in_channels=1, out_channels=1) self.sigmoid = F.sigmoid self.relu = F.relu self.up = InterpolationBlock( scale_factor=2, align_corners=True) def __call__(self, x_high, x_low): x_high = self.conv_high(x_high) x_low = self.conv_low(x_low) w_high = F.average_pooling_2d(x_high, ksize=x_high.shape[2:]) w_high = self.conv_w_high(w_high) w_high = self.relu(w_high) w_high = self.sigmoid(w_high) w_low = x_low.mean(axis=1, keepdims=True) w_low = self.conv_w_low(w_low) w_low = self.sigmoid(w_low) x_high = self.up(x_high) x_high = x_high * w_low x_low = x_low * w_high out = x_high + x_low return out class FPENet(Chain): """ FPENet model from 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1909.08599. Parameters: ---------- layers : list of int Number of layers for each unit. channels : list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. meu_channels : list of int Number of output channels for MEU blocks. use_se : bool Whether to use SE-module. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. classes : int, default 19 Number of segmentation classes. """ def __init__(self, layers, channels, init_block_channels, meu_channels, use_se, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), classes=19, **kwargs): super(FPENet, self).__init__(**kwargs) assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.classes = classes self.fixed_size = fixed_size with self.init_scope(): self.stem = conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2) in_channels = init_block_channels self.encoder = MultiOutputSequential(return_last=False) with self.encoder.init_scope(): for i, (layers_i, out_channels) in enumerate(zip(layers, channels)): stage = FPEStage( in_channels=in_channels, out_channels=out_channels, layers=layers_i, use_se=use_se) stage.do_output = True setattr(self.encoder, "stage{}".format(i + 1), stage) in_channels = out_channels self.meu1 = MEUBlock( in_channels_high=channels[-1], in_channels_low=channels[-2], out_channels=meu_channels[0]) self.meu2 = MEUBlock( in_channels_high=meu_channels[0], in_channels_low=channels[-3], out_channels=meu_channels[1]) in_channels = meu_channels[1] self.classifier = conv1x1( in_channels=in_channels, out_channels=classes, use_bias=True) self.up = InterpolationBlock( scale_factor=2, align_corners=True) def __call__(self, x): x = self.stem(x) y = self.encoder(x) x = self.meu1(y[2], y[1]) x = self.meu2(x, y[0]) x = self.classifier(x) x = self.up(x) return x def get_fpenet(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create FPENet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ width = 16 channels = [int(width * (2 ** i)) for i in range(3)] init_block_channels = width layers = [1, 3, 9] meu_channels = [64, 32] use_se = False net = FPENet( layers=layers, channels=channels, init_block_channels=init_block_channels, meu_channels=meu_channels, use_se=use_se, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def fpenet_cityscapes(classes=19, **kwargs): """ FPENet model for Cityscapes from 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1909.08599. Parameters: ---------- classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_fpenet(classes=classes, model_name="fpenet_cityscapes", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ fpenet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != fpenet_cityscapes or weight_count == 115125) batch = 4 x = np.random.rand(batch, 3, in_size[0], in_size[1]).astype(np.float32) y = net(x) assert (y.shape == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
13,217
29.883178
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/irevnet.py
""" i-RevNet for ImageNet-1K, implemented in Chainer. Original paper: 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088. """ __all__ = ['IRevNet', 'irevnet301'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3, pre_conv3x3_block, DualPathSequential, SimpleSequential class IRevDualPathSequential(DualPathSequential): """ An invertible sequential container for blocks with dual inputs/outputs. Blocks will be executed in the order they are added. Parameters: ---------- return_two : bool, default True Whether to return two output after execution. first_ordinals : int, default 0 Number of the first blocks with single input/output. last_ordinals : int, default 0 Number of the final blocks with single input/output. dual_path_scheme : function Scheme of dual path response for a block. dual_path_scheme_ordinal : function Scheme of dual path response for an ordinal block. last_noninvertible : int, default 0 Number of the final blocks skipped during inverse. """ def __init__(self, return_two=True, first_ordinals=0, last_ordinals=0, dual_path_scheme=(lambda module, x1, x2: module(x1, x2)), dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2)), last_noninvertible=0): super(IRevDualPathSequential, self).__init__( return_two=return_two, first_ordinals=first_ordinals, last_ordinals=last_ordinals, dual_path_scheme=dual_path_scheme, dual_path_scheme_ordinal=dual_path_scheme_ordinal) self.last_noninvertible = last_noninvertible def inverse(self, x1, x2=None): length = len(self.layer_names) for i, block_name in enumerate(reversed(self.layer_names)): block = self[block_name] if i < self.last_noninvertible: pass elif (i < self.last_ordinals) or (i >= length - self.first_ordinals): x1, x2 = self.dual_path_scheme_ordinal(block.inverse, x1, x2) else: x1, x2 = self.dual_path_scheme(block.inverse, x1, x2) if self.return_two: return x1, x2 else: return x1 class IRevDownscale(Chain): """ i-RevNet specific downscale (so-called psi-block). Parameters: ---------- scale : int Scale (downscale) value. """ def __init__(self, scale): super(IRevDownscale, self).__init__() self.scale = scale def __call__(self, x): batch, x_channels, x_height, x_width = x.shape y_channels = x_channels * self.scale * self.scale assert (x_height % self.scale == 0) y_height = x_height // self.scale y = F.transpose(x, axes=(0, 2, 3, 1)) d2_split_seq = F.split_axis(y, indices_or_sections=(y.shape[2] // self.scale), axis=2) d2_split_seq = [t.reshape(batch, y_height, y_channels) for t in d2_split_seq] y = F.stack(d2_split_seq, axis=1) y = F.transpose(y, axes=(0, 3, 2, 1)) return y def inverse(self, y): scale_sqr = self.scale * self.scale batch, y_channels, y_height, y_width = y.shape assert (y_channels % scale_sqr == 0) x_channels = y_channels // scale_sqr x_height = y_height * self.scale x_width = y_width * self.scale x = F.transpose(y, axes=(0, 2, 3, 1)) x = x.reshape(batch, y_height, y_width, scale_sqr, x_channels) d3_split_seq = F.split_axis(x, indices_or_sections=(x.shape[3] // self.scale), axis=3) d3_split_seq = [t.reshape(batch, y_height, x_width, x_channels) for t in d3_split_seq] x = F.stack(d3_split_seq, axis=0) x = F.transpose(F.swapaxes(x, axis1=0, axis2=1), axes=(0, 2, 1, 3, 4)).reshape( batch, x_height, x_width, x_channels) x = F.transpose(x, axes=(0, 3, 1, 2)) return x class IRevInjectivePad(Chain): """ i-RevNet channel zero padding block. Parameters: ---------- padding : int Size of the padding. """ def __init__(self, padding): super(IRevInjectivePad, self).__init__() self.padding = padding def __call__(self, x): return F.pad(x, pad_width=((0, 0), (0, self.padding), (0, 0), (0, 0)), mode="constant", constant_values=0) def inverse(self, x): return x[:, :x.size(1) - self.padding, :, :] class IRevSplitBlock(Chain): """ iRevNet split block. """ def __init__(self): super(IRevSplitBlock, self).__init__() def __call__(self, x, _): x1, x2 = F.split_axis(x, indices_or_sections=2, axis=1) return x1, x2 def inverse(self, x1, x2): x = F.concat((x1, x2), axis=1) return x, None class IRevMergeBlock(Chain): """ iRevNet merge block. """ def __init__(self): super(IRevMergeBlock, self).__init__() def __call__(self, x1, x2): x = F.concat((x1, x2), axis=1) return x, x def inverse(self, x, _): x1, x2 = F.split_axis(x, indices_or_sections=2, axis=1) return x1, x2 class IRevBottleneck(Chain): """ iRevNet bottleneck block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the branch convolution layers. preactivate : bool Whether use pre-activation for the first convolution block. """ def __init__(self, in_channels, out_channels, stride, preactivate): super(IRevBottleneck, self).__init__() mid_channels = out_channels // 4 with self.init_scope(): if preactivate: self.conv1 = pre_conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=stride) else: self.conv1 = conv3x3( in_channels=in_channels, out_channels=mid_channels, stride=stride) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=mid_channels) self.conv3 = pre_conv3x3_block( in_channels=mid_channels, out_channels=out_channels) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class IRevUnit(Chain): """ iRevNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the branch convolution layers. preactivate : bool Whether use pre-activation for the first convolution block. """ def __init__(self, in_channels, out_channels, stride, preactivate): super(IRevUnit, self).__init__() if not preactivate: in_channels = in_channels // 2 padding = 2 * (out_channels - in_channels) self.do_padding = (padding != 0) and (stride == 1) self.do_downscale = (stride != 1) with self.init_scope(): if self.do_padding: self.pad = IRevInjectivePad(padding) self.bottleneck = IRevBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, preactivate=preactivate) if self.do_downscale: self.psi = IRevDownscale(stride) def __call__(self, x1, x2): if self.do_padding: x = F.concat((x1, x2), axis=1) x = self.pad(x) x1, x2 = F.split_axis(x, indices_or_sections=2, axis=1) fx2 = self.bottleneck(x2) if self.do_downscale: x1 = self.psi(x1) x2 = self.psi(x2) y1 = fx2 + x1 return x2, y1 def inverse(self, x2, y1): if self.do_downscale: x2 = self.psi.inverse(x2) fx2 = - self.bottleneck(x2) x1 = fx2 + y1 if self.do_downscale: x1 = self.psi.inverse(x1) if self.do_padding: x = F.concat((x1, x2), axis=1) x = self.pad.inverse(x) x1, x2 = F.split_axis(x, indices_or_sections=2, axis=1) return x1, x2 class IRevPostActivation(Chain): """ iRevNet specific post-activation block. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(IRevPostActivation, self).__init__() with self.init_scope(): self.bn = L.BatchNormalization( size=in_channels, eps=1e-5) self.activ = F.relu def __call__(self, x): x = self.bn(x) x = self.activ(x) return x class IRevNet(Chain): """ i-RevNet model from 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, in_channels=3, in_size=(224, 224), classes=1000): super(IRevNet, self).__init__() assert (in_channels > 0) self.in_size = in_size self.classes = classes with self.init_scope(): self.features = IRevDualPathSequential( first_ordinals=1, last_ordinals=2, last_noninvertible=2) with self.features.init_scope(): setattr(self.features, "init_block", IRevDownscale(scale=2)) in_channels = init_block_channels setattr(self.features, "init_split", IRevSplitBlock()) for i, channels_per_stage in enumerate(channels): stage = IRevDualPathSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) else 1 preactivate = not ((i == 0) and (j == 0)) setattr(stage, "unit{}".format(j + 1), IRevUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, preactivate=preactivate)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) in_channels = final_block_channels setattr(self.features, "final_merge", IRevMergeBlock()) setattr(self.features, "final_postactiv", IRevPostActivation(in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x, return_out_bij=False): x, out_bij = self.features(x) x = self.output(x) if return_out_bij: return x, out_bij else: return x def inverse(self, out_bij): x, _ = self.features.inverse(out_bij) return x def get_irevnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create i-RevNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 301: layers = [6, 16, 72, 6] else: raise ValueError("Unsupported i-RevNet with number of blocks: {}".format(blocks)) assert (sum(layers) * 3 + 1 == blocks) channels_per_layers = [24, 96, 384, 1536] init_block_channels = 12 final_block_channels = 3072 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = IRevNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def irevnet301(**kwargs): """ i-RevNet-301 model from 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_irevnet(blocks=301, model_name="irevnet301", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ irevnet301, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != irevnet301 or weight_count == 125120356) x = np.random.rand(2, 3, 224, 224).astype(np.float32) y = net(x) assert (y.shape == (2, 1000)) y, out_bij = net(x, return_out_bij=True) x_ = net.inverse(out_bij) assert (x_.shape == (2, 3, 224, 224)) assert (np.max(np.abs(x - x_.array)) < 1e-3) if __name__ == "__main__": _test()
15,501
30.962887
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/model_store.py
""" Model store which provides pretrained models. """ __all__ = ['get_model_file'] import os import zipfile import logging import hashlib _model_sha1 = {name: (error, checksum, repo_release_tag) for name, error, checksum, repo_release_tag in [ ('alexnet', '1610', 'd666015b6e4e82eccc5d6c47cd35282a8aede469', 'v0.0.481'), ('alexnetb', '1705', 'a22a3ab8a8fba9ab0cb729d3f4f32f4d4eaed56c', 'v0.0.485'), ('zfnet', '1675', '0205a9ab48a3d46407dd6263b1e5e003668ba3e0', 'v0.0.395'), ('zfnetb', '1456', '5808c73ebdc868de4da958a3b9b29f9646ecbfbb', 'v0.0.400'), ('vgg11', '1017', '7934dcf08eb44f0ebdb0a654733aba0c68e079cd', 'v0.0.381'), ('vgg13', '0952', 'f6af5a265c59e6ba07062bd917ef149077019338', 'v0.0.388'), ('vgg16', '0833', '5e08a9eccf74f0e89001f2bbccfc3aa2cd4b370f', 'v0.0.401'), ('vgg19', '0766', 'abf329094b917c8c73d3a584f3fd3a76eec4f8c3', 'v0.0.420'), ('bn_vgg11', '0937', '8fcdb341a39dd2b45c17d2db5304c61dc1b9227c', 'v0.0.339'), ('bn_vgg13', '0887', '1709fd1a05ff302100434574b8c10f9788c06f48', 'v0.0.353'), ('bn_vgg16', '0759', '8d6a2a82be26b8126cd8e5ead52c6ba0f2c3bdca', 'v0.0.359'), ('bn_vgg19', '0688', '5b6f413cb019374591a6f6597e4ced2ae81fb924', 'v0.0.360'), ('bn_vgg11b', '0978', '54b2345ee8b3251a1d9eba3f4b3c7e8b33d0b0ab', 'v0.0.407'), ('bn_vgg13b', '0916', 'e0110b44936aefc987f007654dca2ac150994d8e', 'v0.0.488'), ('bn_vgg16b', '0775', '037038446bcc72f2f42d042003e2e2a8cf4dc923', 'v0.0.489'), ('bn_vgg19b', '0733', '44d38dbe3b7134da21af424e743a284aadcb37bc', 'v0.0.490'), ('bninception', '0752', '44a9e12ccd43a521ea09b021214f2951725d0826', 'v0.0.405'), ('resnet10', '1224', 'ca2013186ce5b9c21d12cc5d2b570a79a3b3f575', 'v0.0.569'), ('resnet12', '1204', '651ffc1c04d51d6268e816bef89508f44b3a8ea4', 'v0.0.485'), ('resnet14', '1093', 'adafc1c18a43d882da3af341d4a0ff36db49b924', 'v0.0.491'), ('resnetbc14b', '1036', '8c665d1b9c0ade0a6707c9586fbb00a85aaf1f7a', 'v0.0.481'), ('resnet16', '0978', 'd2b6300f4c0ac1e3272ea4a58ece9e4d34b0a58d', 'v0.0.493'), ('resnet18_wd4', '1745', '79de61deb25b02203ba3f6da4aaf74a4287fc01d', 'v0.0.262'), ('resnet18_wd2', '1285', 'ae41e11d7dec4121f88552b806918bc160fcca05', 'v0.0.263'), ('resnet18_w3d4', '1067', '4defa49f9173d881ac7c3f9595db1d176d4be122', 'v0.0.266'), ('resnet18', '0868', '6e670b225570fe0d2862cd3bc65ac72480f3cf87', 'v0.0.478'), ('resnet26', '0824', '0ae9add49746f378bed04313828cc08879f5ea13', 'v0.0.489'), ('resnetbc26b', '0755', '74cf9fe93063636e46bcae0ab00ce02e34be9ae5', 'v0.0.313'), ('resnet34', '0746', '1856e049c0f6c18c1812b51e56f7d26efde52733', 'v0.0.291'), ('resnetbc38b', '0675', '9210464e98e553bceeb4812d1d72dbeae02eb72a', 'v0.0.328'), ('resnet50', '0607', 'f4a162287a4ac0ae502d5de8701fb77bfc958fed', 'v0.0.329'), ('resnet50b', '0615', '32bc835e3844b1da62a31198047304b5dc6f799d', 'v0.0.308'), ('resnet101', '0517', '88c52266a9452ffa111e1c11adb6bb2f1d071fe8', 'v0.0.499'), ('resnet101b', '0514', '077eb1e282d5ece78d68bb329874b80b8f052e23', 'v0.0.357'), ('resnet152', '0447', '99b827c464d10138928fa680a14b86648fd378b6', 'v0.0.518'), ('resnet152b', '0430', 'c047395271a19c7a7095c032787d9b9919b24d99', 'v0.0.517'), ('preresnet10', '1402', '94e8fc28c7129095273a9e17f6f8d7cc7f88aefc', 'v0.0.249'), ('preresnet12', '1318', 'fea1c8c51c1084ff1573abbaea7df6e3aee6a7ee', 'v0.0.257'), ('preresnet14', '1224', 'f9973f4f030c379c691ae52d9fe9795595e8e7c6', 'v0.0.260'), ('preresnetbc14b', '1153', '1d37e5334d2854dab5b85235fa24c08b6942bbca', 'v0.0.315'), ('preresnet16', '1080', 'ac7a346a9f200da344a334848aaf98c984446a29', 'v0.0.261'), ('preresnet18_wd4', '1778', '1cf8aa487aa79f903ccd4779d19e3cf01326a937', 'v0.0.272'), ('preresnet18_wd2', '1312', 'fa4ce56a87f1763cc037bdf6333d906fbbc86963', 'v0.0.273'), ('preresnet18_w3d4', '1069', '25ddcd56602fa811320836d280afa2bd015d7c7f', 'v0.0.274'), ('preresnet18', '0954', '21e4811aa9c868bc4afb21ca773493322ba09e82', 'v0.0.140'), ('preresnet26', '0838', '8cbc763838f642e8ca770769b57f979745e30be3', 'v0.0.316'), ('preresnetbc26b', '0786', '4c1e6a248620c09d884ab7a00a7a9a38b9f36f13', 'v0.0.325'), ('preresnet34', '0755', 'b664c649a04a07364543a6125cf1e0286c64af6c', 'v0.0.300'), ('preresnetbc38b', '0636', '3105fbe866a6aff33a262efa904f5a2ab01881ff', 'v0.0.348'), ('preresnet50', '0624', 'a2bba5b6b4136029626fa717f661495f1e0f0de5', 'v0.0.330'), ('preresnet50b', '0634', '605b0eec9ab02677872ebe86acb200c1e7036300', 'v0.0.307'), ('preresnet101', '0534', 'b4e33e606ef4b4f323f7bcdc688ad2d265e1c6e5', 'v0.0.504'), ('preresnet101b', '0538', 'b502bf25880a9579cd22ab89341a6effcf5d48af', 'v0.0.351'), ('preresnet152', '0447', 'f62a6bc823c9439d372176c51fc648f4b24452ae', 'v0.0.510'), ('preresnet152b', '0438', '50121ca1db2920af1d6e7d7aaa1d9dbf574cd423', 'v0.0.523'), ('preresnet200b', '0446', 'f44d47e8eaf165cec579e2f76183b49f2dc828bf', 'v0.0.529'), ('preresnet269b', '0504', 'c93634683e040dbcf2bce9431fad3726a44532a8', 'v0.0.545'), ('resnext14_16x4d', '1226', '80d9a3310326debcf4f9669842fd03c56d88d504', 'v0.0.370'), ('resnext14_32x2d', '1249', '892f96a44bdd01e12eca47d478f3b8c0b3784555', 'v0.0.371'), ('resnext14_32x4d', '1115', 'fa0e7f7fd7d4a60a2876bb3c303dd1ee9fea264c', 'v0.0.327'), ('resnext26_32x2d', '0849', '58d86996a5e83efe1f49513b7ef5e7803f4830b6', 'v0.0.373'), ('resnext26_32x4d', '0719', '62ca50907121ceee5aada95eda7398dca69928cb', 'v0.0.332'), ('resnext50_32x4d', '0546', 'd1e20cc5a1c88a131033e2b1c4f4e06d28084c73', 'v0.0.498'), ('resnext101_32x4d', '0416', 'ec66f27ffcc27337c5b7d74f68cd3b0b466fb22c', 'v0.0.530'), ('resnext101_64x4d', '0438', '06bfc635568c5f87a998c512fc36c698dfcb8946', 'v0.0.544'), ('seresnet10', '1170', '2b3424cb76b192830ac243df3f5d60f2a0167e9c', 'v0.0.486'), ('seresnet12', '1176', '0095f8bd6c886fb6d84fd3a0ab2548e30e23668b', 'v0.0.544'), ('seresnet14', '1100', 'fb477b24f6e53027d67da964a41a1e412dc95604', 'v0.0.545'), ('seresnet16', '0971', 'c15d615add91439be1f4fb686aa6efaba250ab3c', 'v0.0.545'), ('seresnet18', '0923', 'b0931abe71a56c02e9c1012d34b99e469602eacc', 'v0.0.355'), ('seresnet26', '0806', '00032d5b07f7f580d6b39f433dec3133940d9418', 'v0.0.363'), ('seresnetbc26b', '0684', '884c0e6bfaa08ac9250e90716e61e0e491f27648', 'v0.0.366'), ('seresnetbc38b', '0579', '7f103cd01544f11e9cd18e9cfefb56b00d215901', 'v0.0.374'), ('seresnet50', '0559', '6c5585d5e6e3fccd3ff742f081175fa0f4c2bf9f', 'v0.0.441'), ('seresnet50b', '0530', '1ac3bf504713faa817e7cf1c5e8e37fb4368a883', 'v0.0.387'), ('seresnet101', '0441', 'c70e51906cdd617dc6fa29956ad307b2d8e47698', 'v0.0.533'), ('seresnet101b', '0463', '97cc55c354860719b9c206d6808e2ebd7a019bf9', 'v0.0.460'), ('seresnet152', '0430', '0f5f30354879e404e363917de2cf8ddfb5f7a41b', 'v0.0.538'), ('sepreresnet10', '1220', '1bdbe9ae1acc4aeb018a2257a387c53216e78cf9', 'v0.0.544'), ('sepreresnet12', '1180', 'f41eb3683101fb9a2e775b41c2a45b4d4c48b673', 'v0.0.543'), ('sepreresnet16', '0955', '904e014fafe13bf9552796492d77c97d5c5321f0', 'v0.0.543'), ('sepreresnet18', '0883', '356877c7e7be4aa4e04b86bbd7537c2c6b68a4ec', 'v0.0.543'), ('sepreresnet26', '0805', 'e7e6cc562a715c904dfdea61e116b01ca9dbfd8a', 'v0.0.543'), ('sepreresnetbc26b', '0638', 'e8393574e457106c4963f7979d91b67dc7554239', 'v0.0.399'), ('sepreresnetbc38b', '0566', '4b9ce0969cc05e5a710fed7073132ad025789091', 'v0.0.409'), ('sepreresnet50b', '0531', 'fde03b262da6696c27d1de9188b0d4f35854b92b', 'v0.0.461'), ('seresnext50_32x4d', '0435', '52a11b61d67ede6016b5169d10294bce351e3862', 'v0.0.505'), ('seresnext101_32x4d', '0445', 'fd8f34d6995be18893c99d5568fd016d1a491e25', 'v0.0.529'), ('seresnext101_64x4d', '0407', '5c3b4e4b4a4bd1922519c87e9f25e63ddcac1143', 'v0.0.561'), ('senet16', '0807', 'f45aa3fffb8ea5148c53d031e50a3f93ab00ede0', 'v0.0.341'), ('senet28', '0591', '7e7bf250ab1bb4842f6dd32ceb93967a7c02239b', 'v0.0.356'), ('senet154', '0442', '9f7f0ae340671343ae7e341b1183cacc2f20124c', 'v0.0.522'), ('resnestabc14', '0633', 'a76f3b83d2ecab7ee278000f6d205ddae648e0bf', 'v0.0.493'), ('resnesta18', '0694', '4ecaf0b7ce07224b4eb1972562ec8aa39e481796', 'v0.0.489'), ('resnestabc26', '0469', '14cb6ca540ef6cb8feb35199b015e41d69fb5df6', 'v0.0.495'), ('resnesta50', '0438', '84192ed0ab36270dfc4bed570cc5e1171b4bf4b3', 'v0.0.531'), ('resnesta101', '0400', 'bd2efb4282ecce70dfb7985a2fbda9a0ee11a955', 'v0.0.465'), ('resnesta152', '0452', '70e32d11f9c62120fb1be6d2a616f90fdf4647d4', 'v0.0.540'), ('resnesta200', '0339', 'b521427b8e168879455118c21ed4d99f2d37a682', 'v0.0.465'), ('resnesta269', '0336', '933dbe64fb27a174752b77980bad3c545582c5e8', 'v0.0.465'), ('airnet50_1x64d_r2', '0528', '650960afc0ace5180fa6a2542a183f8b54ee9bfe', 'v0.0.522'), ('airnet50_1x64d_r16', '0547', '6d3d6db4c9ae6f449c3fa9d7d2787dad7e03bed1', 'v0.0.519'), ('airnext50_32x4d_r2', '0503', '2bb8e4fc574d18a8b0896c1505de7bf4551d682d', 'v0.0.521'), ('bam_resnet50', '0535', 'e2069bb5c18d2dca451688615bc0197a3387e441', 'v0.0.499'), ('cbam_resnet50', '0488', '8a05bbda852fa152cd057b4e716bdebee024eb6d', 'v0.0.537'), ('scnet50', '0535', 'f0ef9a4c4d2b3202c5f73fa3f0a129f3a5baba63', 'v0.0.493'), ('scnet101', '0464', '7b1574415a93dc6d3ad66dfd494d99cceec925a6', 'v0.0.507'), ('scneta50', '0466', 'e90cf3c5fb3dacdb2945a8cfa7fdb096fc5bafd2', 'v0.0.472'), ('regnetx002', '1038', '04ae28288f3d6ae14a0397122ab47d1d9f6bb131', 'v0.0.475'), ('regnetx004', '0855', 'ecd227780275b53cd351fe902bfa43314decc6f1', 'v0.0.479'), ('regnetx006', '0760', 'fadb78d4fd6db17e87525a4565140e60d2cfdf9d', 'v0.0.482'), ('regnetx008', '0723', '5fff64913b28a54b188443e5ca7c2f3e4ccb12be', 'v0.0.482'), ('regnetx016', '0613', '5092bfd9e5c842c3ed5bfa664af4fc93bdcbbb19', 'v0.0.486'), ('regnetx032', '0569', 'c3625268f53c5c78f6a0cd78444890abc0a3286c', 'v0.0.492'), ('regnetx040', '0471', '362a2ce3dd02702db90e37deb8cc858e26fa9674', 'v0.0.495'), ('regnetx064', '0455', '3bdbfa0d09d0d06d66d43fbd81d8191a6bc949f8', 'v0.0.535'), ('regnetx080', '0467', '3618e40bed12247e0f27027f09adecceafc514eb', 'v0.0.515'), ('regnetx120', '0519', '686990070b7bd855b5792424056acea21f16c5f1', 'v0.0.542'), ('regnetx160', '0458', '99da1e86058c2dd4c78da404eab02227955023f2', 'v0.0.532'), ('regnetx320', '0394', '22a74cbae34c199dd199df216f2b87fc1ab4884e', 'v0.0.548'), ('regnety002', '0955', '5ba3e62c853d6216aadd189c49cfa2dc0d9fd871', 'v0.0.476'), ('regnety004', '0752', 'e30b7c274003dad0f2fa5c8e16e2e34992acc3ea', 'v0.0.481'), ('regnety006', '0700', '0917e50c0cdded628c6c7a4318bf42d55e4f9960', 'v0.0.483'), ('regnety008', '0645', 'aa4c6104c38d80ead55cbab28445d5a149ddbc52', 'v0.0.483'), ('regnety016', '0571', '962bc21c498ac8022a863853a6d3b030ffa1b997', 'v0.0.486'), ('regnety032', '0411', '7097f659b5ac93d7a53d8e94bf87f05c6f16007c', 'v0.0.473'), ('regnety040', '0466', 'c84f52dfeeb8001e6456fc033090bd27768f5834', 'v0.0.494'), ('regnety064', '0446', '22d3ab05b95da9402dd8aaf08970c7fb13120b76', 'v0.0.513'), ('regnety080', '0437', 'f324b0464dc045e411606b6da836d13d5e456ce8', 'v0.0.516'), ('regnety120', '0430', 'fa03425ab8fc82a9bbbb59a4eb5173bb9f8ea7aa', 'v0.0.526'), ('regnety160', '0429', '0a034eb9e3171320f3bd41199b466c96534f14aa', 'v0.0.527'), ('regnety320', '0372', '7bc537394ea4baabb5b839b5bf63d1de24ef5007', 'v0.0.550'), ('pyramidnet101_a360', '0520', 'e37960c468df05477caaf2fec90d9a81ff675cf3', 'v0.0.507'), ('diracnet18v2', '1113', 'b85b43d13697dfbddbea6e46dea4766359fff7e5', 'v0.0.111'), ('diracnet34v2', '0948', '0245163a5c947bd6e07a743f17e6ca92c79c84da', 'v0.0.111'), ('densenet121', '0683', '4caa2458d39ef6dc467ef3d1a2921ce214b9ddda', 'v0.0.314'), ('densenet161', '0590', 'a514f930224961341c65890cb1039b02076426b8', 'v0.0.432'), ('densenet169', '0609', '99c9bddf1ae3472efad2b4775fd91b540078e1d3', 'v0.0.406'), ('densenet201', '0590', 'f50cfbb1d3cf084d107cff5d165dd5c7fc72b6b9', 'v0.0.426'), ('condensenet74_c4_g4', '0861', 'ef6077ec5348504346b3bcbaacbc308f825a9f87', 'v0.0.36'), ('condensenet74_c8_g8', '1043', '277fbfb898e0c8c7de8475184bcf5e651da10acc', 'v0.0.36'), ('peleenet', '0984', '32d512c0d0f89b8cbc413f34d6d6609712f6668c', 'v0.0.496'), ('wrn50_2', '0607', 'c597636592320ac714874bb4dbe90e4c89c3b6c9', 'v0.0.520'), ('drnc26', '0710', '16a6251b3f82de588f43e203a6845de006a10882', 'v0.0.508'), ('drnc42', '0614', '6b2099e0224e0f451a67b5d8266bd9a6535793e2', 'v0.0.556'), ('drnc58', '0517', 'a333394a13eebff2732db2277db6a0f37eba76c6', 'v0.0.559'), ('drnd22', '0747', 'e4c5cf7351f691f0afe4c7b4325fd4e3a4b47681', 'v0.0.498'), ('drnd38', '0624', '1a1a7b99703d3d8bade3ebb0f22f0ffbad33ad60', 'v0.0.552'), ('drnd54', '0500', 'b679707c999e1a6f24a63e3d96f667a941cef141', 'v0.0.554'), ('drnd105', '0489', '49cba09d60243592c294e526b07ce9b031e736f4', 'v0.0.564'), ('dpn68', '0656', 'bf9b72e9749da4c6ee5a544639f78ac7fa85f7ce', 'v0.0.310'), ('dpn98', '0426', '2ab67669d6b82719cd23128ccdf1ef71511bea94', 'v0.0.540'), ('dpn131', '0479', 'eb5a4bc267cee898cb59c4a42639a36aa47e3220', 'v0.0.534'), ('darknet_tiny', '1746', 'b04fa46318a78e977aa5a117786968d98d325871', 'v0.0.69'), ('darknet_ref', '1671', 'b2d5721f3a5f6f05cc785d57ff7a63fe82f6325e', 'v0.0.64'), ('darknet53', '0552', '3e6f40761d22e830fe6040def741590bfb77a653', 'v0.0.501'), ('irevnet301', '0738', 'cc2ac933dd09db744a2931a56f16b7edd663f9a1', 'v0.0.564'), ('bagnet9', '2544', '346ee14310c654f3484b5ef380950557fc5a2078', 'v0.0.553'), ('bagnet17', '1523', 'ccc69ea4558cd023e9e0a8e13269ef63700b63e0', 'v0.0.558'), ('bagnet33', '1044', '17d82bc6bff4e87629259271df27e0c2fe818f01', 'v0.0.561'), ('dla34', '0706', '576dd492cc7047152c1251c0b56595da6e09e0bb', 'v0.0.486'), ('dla46c', '1292', '98e3efd5e9cd50d3b403bc36b71614aad4bf69ff', 'v0.0.282'), ('dla46xc', '1228', 'c2dc61bc0ac57dc4f5b4041d3261ac3d7df521b2', 'v0.0.293'), ('dla60', '0554', '740881ca03874579a87915a60474e2844dd9aa5b', 'v0.0.494'), ('dla60x', '0554', '4d7575621aaca43cbb21184ad6a4757e45383e3c', 'v0.0.493'), ('dla60xc', '1076', '4c418399df58871201cc0487db4e72411ff53c44', 'v0.0.289'), ('dla102', '0518', 'ebbfedd74580223c69e97c7231c48e430b1f9bc6', 'v0.0.505'), ('dla102x', '0474', '39bad3b5b8c1fc55069f58c8a52328349097c8e7', 'v0.0.528'), ('dla102x2', '0425', '4ffec22510a29d1a22d0eb9666be1649b120c789', 'v0.0.542'), ('dla169', '0461', 'f47952e20e67ac6b526ae9946f673d3a5c57e4da', 'v0.0.539'), ('fishnet150', '0465', '724e74fc0471c81dd75a97c1928f46d06d10815b', 'v0.0.502'), ('espnetv2_wd2', '1969', 'a245c12af84f0444b12e9f8b37df1e8e43b7bb9c', 'v0.0.567'), ('espnetv2_w1', '1380', '551b5babcfeb978c9517558b3128fd70ff73d41a', 'v0.0.569'), ('espnetv2_w5d4', '1224', '68e58dc622831639abaa3eca0788050309fa71da', 'v0.0.564'), ('espnetv2_w3d2', '1083', '8e56a2e5ad5f3aee5e329aa4d98d77f8e77d738c', 'v0.0.566'), ('espnetv2_w2', '0944', '5ec4ed9507d1654035da89337fb7a376802e6abe', 'v0.0.566'), ('dicenet_wd5', '3055', '708911b883ae8d99c0d59dd18f9a23a9673aeb16', 'v0.0.563'), ('dicenet_wd2', '2309', 'b383f3a67410e388ee15c597a4d16dacaccabb54', 'v0.0.561'), ('dicenet_w3d4', '1623', '16a253d0e1dbe2924ff6a480b4769defb77fe0b4', 'v0.0.567'), ('dicenet_w1', '1413', 'e926dbb2c070eb0b0c92fcb05d361d427c6a32ad', 'v0.0.513'), ('dicenet_w5d4', '1255', 'f4914cf7100caf8d768c15518caba45185a03f64', 'v0.0.515'), ('dicenet_w3d2', '1144', 'a6b89c5e819e032f30a729170871a7f2c13b11d6', 'v0.0.522'), ('dicenet_w7d8', '1080', 'f2a3e6aeb1041a7d97afb3b21e1bd659cf1527c9', 'v0.0.527'), ('dicenet_w2', '0918', 'a5ba3293b088ad8183bca596a391a017c2357680', 'v0.0.569'), ('hrnet_w18_small_v1', '0873', '96476e4b15451b62abf443101a5152e34be552e2', 'v0.0.492'), ('hrnet_w18_small_v2', '0603', '87be573162d6f21af4c816a7b389a93ef9f7af85', 'v0.0.499'), ('hrnetv2_w18', '0504', '27b854bf5ec915e735ef9deb7b28049db3f32e14', 'v0.0.508'), ('hrnetv2_w30', '0506', '04453d82f1903ef7bae24928089eee31e96b7501', 'v0.0.525'), ('hrnetv2_w32', '0493', '9f43468d683eb07dc663025a923a100872e702e0', 'v0.0.528'), ('hrnetv2_w40', '0479', '738507ee2be69b25ed561c268c9dd2e494995a1f', 'v0.0.534'), ('hrnetv2_w44', '0490', '6e10e2b59d14d390c539c07a399c52513ca3f348', 'v0.0.541'), ('hrnetv2_w48', '0486', 'e8df81b5f3c59b8a09b4ca898c9f557ea93abfb6', 'v0.0.541'), ('hrnetv2_w64', '0479', '702c5688006ccc606cfc7deabab67a988aa7bf7e', 'v0.0.543'), ('vovnet27s', '0978', '6983e4ee743e345a1ff587522d4daa2a344d7c12', 'v0.0.551'), ('vovnet39', '0553', '6a8b67836ffb3e08fbad0b56b4ba0e5437dd9056', 'v0.0.493'), ('vovnet57', '0512', 'beb31bd5037c55a1652cf84ab4bff37775bdd515', 'v0.0.505'), ('selecsls42b', '0601', 'd89a50426a90267fcb5f23fb29f323de8eb53a20', 'v0.0.493'), ('selecsls60', '0514', '637c2506b3681efe06a39b6dccff6e274f0bb6e8', 'v0.0.496'), ('selecsls60b', '0538', 'f9f8d657c6abc03ef15c08b17149d799ba8b02ce', 'v0.0.495'), ('hardnet39ds', '0870', 'fcf92ed6c41c073e506f0e7e15ca20ce13795920', 'v0.0.485'), ('hardnet68ds', '0743', 'a6b77ed008c9a937f6a915f1b8440d4310ee982e', 'v0.0.487'), ('hardnet68', '0704', '71176051a7c9e95b6bdceb693664ecdaf9241b95', 'v0.0.557'), ('hardnet85', '0569', '5f3cb7bebb19060b3c02bbe899d98daae8c680a0', 'v0.0.495'), ('squeezenet_v1_0', '1738', '4c55a6a5c7ae14b88a7989eea5a7dc60960120ef', 'v0.0.128'), ('squeezenet_v1_1', '1740', 'b236c2047fe1d9b283ccfaabb763143a214ecc33', 'v0.0.88'), ('squeezeresnet_v1_0', '1766', '6dc69dc26e83beaa98fa77ee64d208294f7850f9', 'v0.0.178'), ('squeezeresnet_v1_1', '1787', 'f40e60512a8b66f314f4d7ffab9b18dd31715b3a', 'v0.0.70'), ('sqnxt23_w1', '1903', 'ef3d725b418277e98ed5e590e615cc13df2f001e', 'v0.0.171'), ('sqnxt23v5_w1', '1786', '8b24c6e36f00be6d1b970f3c10e2b956fe281357', 'v0.0.172'), ('sqnxt23_w3d2', '1344', 'a5c3b21eb05532cba4b35f530fea2bdaac3d6bf5', 'v0.0.210'), ('sqnxt23v5_w3d2', '1292', 'c997e27957a32f89538f23d86207a044d2dc0c93', 'v0.0.212'), ('sqnxt23_w2', '1082', 'cf7aebefd6abb1fb3fea72dc10e0ad3dd145be8b', 'v0.0.240'), ('sqnxt23v5_w2', '1043', 'e9e849cdfeba0f8b3cdfd34bc214cc6526016dc4', 'v0.0.216'), ('shufflenet_g1_wd4', '3681', '15d3e7871b85cee9283663bbbc78dfe5e1a1a1db', 'v0.0.134'), ('shufflenet_g3_wd4', '3616', '064f7f7f1dd327f43e16adf5e4864a31e16d9ad9', 'v0.0.135'), ('shufflenet_g1_wd2', '2235', '5d83cc2822fbd0669af75d93c7940aa09e78d317', 'v0.0.174'), ('shufflenet_g3_wd2', '2061', '557e4397da6cebf2dd7b70e8039100f07414437a', 'v0.0.167'), ('shufflenet_g1_w3d4', '1677', 'b5515ea9c945c92fc4272ba7daf0002314cc61de', 'v0.0.218'), ('shufflenet_g3_w3d4', '1613', '55129cb578d0d53bb962e703da0746930d092c2a', 'v0.0.219'), ('shufflenet_g1_w1', '1348', '37cc6c5f70ad982ff3fc9c92a0ae6405bb46e2c7', 'v0.0.223'), ('shufflenet_g2_w1', '1333', 'e473c62fe289cc2563cb17cfa4c8562f25fd6e49', 'v0.0.241'), ('shufflenet_g3_w1', '1326', '95df048749f08aa69e9aed33a8bd7182b4caf2df', 'v0.0.244'), ('shufflenet_g4_w1', '1308', '8ed92f35a9d69874e3c9d040785f6c71c54d976c', 'v0.0.245'), ('shufflenet_g8_w1', '1321', '2fea8945a2115c718cdb09a22a95f4e2808e098b', 'v0.0.250'), ('shufflenetv2_wd2', '2073', 'c5e5a23c300c800d55e2f45e1dcb2e12907c0eae', 'v0.0.90'), ('shufflenetv2_w1', '1298', '3830a2da0701f2b31385aceeb828101008446812', 'v0.0.133'), ('shufflenetv2_w3d2', '1014', '5f75edb160035ea6e8f2896e4c233fa2a1494af1', 'v0.0.288'), ('shufflenetv2_w2', '0899', 'a44b1d5d86f6041e8d34fb3b13563d144dc6b4c0', 'v0.0.301'), ('shufflenetv2b_wd2', '1787', '08a12021fa41000f5f6206446d34daa2eebb8d00', 'v0.0.157'), ('shufflenetv2b_w1', '1100', '21562fb22a353559c6c732e54e807766bb576dee', 'v0.0.161'), ('shufflenetv2b_w3d2', '0878', '7a5c7ed4aa440788875680b2a12531716ee02f98', 'v0.0.203'), ('shufflenetv2b_w2', '0810', '636e281ce91bf852fd20adb07f0037be8dd3d6b6', 'v0.0.242'), ('menet108_8x1_g3', '2042', '9e3ff283ac81b4f4e6d4a5b11d8d54b63f4aa2f0', 'v0.0.89'), ('menet128_8x1_g4', '1919', 'f6fd56fae09d0c528c902d1381f7cf401590d130', 'v0.0.103'), ('menet160_8x1_g8', '2042', '250fd7654d54c79477ef7cbf402e15d69ea3ea6a', 'v0.0.154'), ('menet228_12x1_g3', '1301', '39c25ca345751cac91395a602565796393fea60d', 'v0.0.131'), ('menet256_12x1_g4', '1218', '57160b09127535a3733f22af10d50fb16d5d2643', 'v0.0.152'), ('menet348_12x1_g3', '0936', 'ee7e056d0f38a68a6d6c85fe8162bee944a73121', 'v0.0.173'), ('menet352_12x1_g8', '1172', 'c256ae25591e33ce6b9e12177305eacb3dd9620c', 'v0.0.198'), ('menet456_24x1_g3', '0779', '5af355f6457347168d5b95323b6d7480360398d8', 'v0.0.237'), ('mobilenet_wd4', '2216', '09c50ab8d72049a4aa9cae4bd1502859522b9a70', 'v0.0.62'), ('mobilenet_wd2', '1337', '48d12ee398fa6dc23596f669fb202f08108a6ccc', 'v0.0.156'), ('mobilenet_w3d4', '1053', 'd7ec3192f88b7017d477fdb704ad6ad77a4c5cc1', 'v0.0.130'), ('mobilenet_w1', '0866', 'b888f817a2978cdeb00a09fd5e71c3f2a52ddd8c', 'v0.0.155'), ('mobilenetb_wd4', '2164', '65e4eeb5d97217ca029056b68410dddce46367b4', 'v0.0.481'), ('mobilenetb_wd2', '1269', 'a649a585b080f5605e66807d49f5092a2d306b99', 'v0.0.480'), ('mobilenetb_w3d4', '1019', 'a54016b221391951f530fde1d66333026142ee97', 'v0.0.481'), ('mobilenetb_w1', '0788', 'e95ffdb9154278b60b27d58900d4985d6498c89b', 'v0.0.489'), ('fdmobilenet_wd4', '3063', '55407f3a3e3370fa2951f651f14faac3bf9a9f28', 'v0.0.177'), ('fdmobilenet_wd2', '1976', '6299d44272390440be808e58059219b0d57907e4', 'v0.0.83'), ('fdmobilenet_w3d4', '1599', 'cdfc2e043017be0166cf06cb9f49e0f516aa5d15', 'v0.0.159'), ('fdmobilenet_w1', '1316', '0ed6f00cbb5095eff002882e31c006edb1c5235e', 'v0.0.162'), ('mobilenetv2_wd4', '2411', '9fc398d348226c410659464d12b0fe6b7d4506e7', 'v0.0.137'), ('mobilenetv2_wd2', '1444', 'ca0906e176f15855aa8c8d771c841c3f9cd3d454', 'v0.0.170'), ('mobilenetv2_w3d4', '1047', 'a25fd26c426b5af8c5761b9d634b508622f019cf', 'v0.0.230'), ('mobilenetv2_w1', '0866', 'efc3331e08dfc578526bbf5e161c15e50b146c63', 'v0.0.213'), ('mobilenetv2b_wd4', '2342', 'bf23c31450e60bba8f19745e23d6d4b579387cc8', 'v0.0.483'), ('mobilenetv2b_wd2', '1376', 'f68cc37dc7ac2517fc9d8a0b25d8e454012549bb', 'v0.0.486'), ('mobilenetv2b_w3d4', '1067', 'ba0caa95046fd230f974022313171c241bc841af', 'v0.0.483'), ('mobilenetv2b_w1', '0890', 'dbc98d15c71586688436c4359a5a536c75e1559b', 'v0.0.483'), ('mobilenetv3_large_w1', '0733', '20f2980c2bb0140f587a959f50ef66b9b570698d', 'v0.0.491'), ('igcv3_wd4', '2828', '25942192926a7dcdd0c57238336a8a0ef840e079', 'v0.0.142'), ('igcv3_wd2', '1704', '86246558ade35232344a4c448288ae3927143f9c', 'v0.0.132'), ('igcv3_w3d4', '1099', 'b0dbc54a5c40c7bd55ebd3cab05e39263064f4ec', 'v0.0.207'), ('igcv3_w1', '0898', '5fd85acd8a4ed75845e2ef770c25460c5f7eff95', 'v0.0.243'), ('mnasnet_b1', '0725', '2733981b74b01d4e4984a7e5060e84e5a8cfeab4', 'v0.0.493'), ('mnasnet_a1', '0705', '9ac62ab0edbb8f28d13670b0d34893353ae2bd7a', 'v0.0.486'), ('darts', '0758', '8085336b75ace69817910c822149acb67083cf95', 'v0.0.485'), ('proxylessnas_cpu', '0752', '22bd211b1fbf219f1cb28ed7a407e3949a2037ea', 'v0.0.324'), ('proxylessnas_gpu', '0723', 'b81256a146f7e0c08a5d5004332bb409576799f3', 'v0.0.333'), ('proxylessnas_mobile', '0785', '561f3416638764215dcd975b2f7e27fc34974929', 'v0.0.326'), ('proxylessnas_mobile14', '0651', '7467ce2d73d14facfc593c395fe73a6f2d7dc456', 'v0.0.331'), ('fbnet_cb', '0764', '9a8153a55f4338aa22671f090cbb20e65b19c7df', 'v0.0.486'), ('xception', '0518', 'a311fd377a845eacdec8146503a61e672d82a78c', 'v0.0.544'), ('inceptionv3', '0535', '1662fcdcc33232e91193a9e9076f9fa66266c4e8', 'v0.0.552'), ('inceptionv4', '0487', '75970908c643ff9a4cd681ab82d21fb56e3604cd', 'v0.0.543'), ('inceptionresnetv1', '0481', 'a3ddee2c2e5347b7103889cd2590204048d70d4f', 'v0.0.552'), ('inceptionresnetv2', '0472', '178ff37acc2c10d9c770bd0ae14d4aec77aff683', 'v0.0.547'), ('polynet', '0450', '6dc7028b0edc48c452f83dd38448b1242c554a5e', 'v0.0.96'), ('nasnet_4a1056', '0790', '92b4789ba1ad62d39ff7aac4ddbb21af45e84d02', 'v0.0.495'), ('nasnet_6a4032', '0422', 'd49d46631abda0ec7ac4a0076e6f8d05bf99b7d1', 'v0.0.101'), ('pnasnet5large', '0426', '3c2755dce80a29dea19b398dce514a640da2aaa3', 'v0.0.114'), ('spnasnet', '0779', '4fa174dbff90b886eee9b5f9e3f2b164ccda3707', 'v0.0.490'), ('efficientnet_b0', '0725', '8d6f17447e9fa2da26963b72cf8fd359aebba504', 'v0.0.364'), ('efficientnet_b1', '0633', '4ac377d926a55be53052c42f21678c26862a81eb', 'v0.0.376'), ('efficientnet_b0b', '0669', '366e9c540a59d954fdfd13b46f47b91231aa8700', 'v0.0.403'), ('efficientnet_b1b', '0567', '2826a68613cbecc782819f24ddd5b031bfed1586', 'v0.0.403'), ('efficientnet_b2b', '0514', '93c91747fda8ea4f20d6eacb678ba13bacb455bc', 'v0.0.403'), ('efficientnet_b3b', '0436', '82eb9d9104377ec90cfecc8e8f04a9876d3c16f9', 'v0.0.403'), ('efficientnet_b4b', '0392', '81138451fda7683c964ea52a9f2a7ea48622ef33', 'v0.0.403'), ('efficientnet_b5b', '0339', 'fb684f5dc219d9463acb5aa42b48bd920f887cd1', 'v0.0.403'), ('efficientnet_b6b', '0324', 'acaad4db1bb064f088d53b620ee682ecc328c80d', 'v0.0.403'), ('efficientnet_b7b', '0323', '031b7bd5e4c361f734eb40bba1e10a11df0a8374', 'v0.0.403'), ('efficientnet_b0c', '0644', 'e95e873de2fa5ef2fedaff2264fdd3f276a24818', 'v0.0.433'), ('efficientnet_b1c', '0557', '07796241b5ef171966c1be23d911c5604936f385', 'v0.0.433'), ('efficientnet_b2c', '0496', '5a0d33334fedd1b327cec38e78b2c6d6f410051d', 'v0.0.433'), ('efficientnet_b3c', '0440', 'ec082c3117c91028c25e47c2df1eacff1af4673d', 'v0.0.433'), ('efficientnet_b4c', '0368', 'c025d233c76831db75f7032e7b6e2450f9a4813d', 'v0.0.433'), ('efficientnet_b5c', '0311', 'e01810a9209563e4d00aaab725eccc6220662cf8', 'v0.0.433'), ('efficientnet_b6c', '0298', '72ac53f6de551166cb900b38a31582be7b467f3f', 'v0.0.433'), ('efficientnet_b7c', '0291', 'c0711f2102c5211cf4985e87bd9eec1cac2eeb62', 'v0.0.433'), ('efficientnet_b8c', '0276', 'd1c7aa153428ed631730a4510333eec7329667ed', 'v0.0.433'), ('efficientnet_edge_small_b', '0629', '4aac359125638568f1461072a97ed96cb5a8a34c', 'v0.0.434'), ('efficientnet_edge_medium_b', '0552', 'fdf98bd58abbd135f59b8630e2beaffccdbc4832', 'v0.0.434'), ('efficientnet_edge_large_b', '0489', '45f0595804415252d4496153eefd45f2ebc38fd5', 'v0.0.434'), ('mixnet_s', '0705', '4822e76db658a73a098a50b01034d3c9b9f5afdd', 'v0.0.493'), ('mixnet_m', '0634', '2638a38807457af19e82f7761d9be532b5737e36', 'v0.0.493'), ('mixnet_l', '0559', 'ff6929efd523b16c1fdd265624046c74e61fcbff', 'v0.0.500'), ('resneta10', '1161', 'c28d6ca63f9d1f6b5b68c721245ea74772b4afb7', 'v0.0.484'), ('resnetabc14b', '0957', '84e05fea22df04a711a8d60ee69bdfd676b10648', 'v0.0.477'), ('resneta18', '0805', 'f4088383363f90f2da8ef4e422d2d44dada566cf', 'v0.0.486'), ('resneta50b', '0537', '204eb60edce7da09c28bb469a7d91422b1d08c71', 'v0.0.492'), ('resneta101b', '0445', '5b214e8c82cfbbcd05e10927a2429353a343e82b', 'v0.0.532'), ('resneta152b', '0423', '515d6b62a931f56cb6973ab45b08f8a8daaf59ca', 'v0.0.524'), ('resnetd50b', '0550', '7ba88f0436b3fa598520424bb463ac985ffb0caf', 'v0.0.296'), ('resnetd101b', '0460', 'b90f971e4514345fb885de95165ddcc4e6610234', 'v0.0.296'), ('resnetd152b', '0470', '41442334cde93c9744d2a86288d11614c848503a', 'v0.0.296'), ('nin_cifar10', '0743', '045abfde63c6b73fbb1b6c6b062c9da5e2485750', 'v0.0.175'), ('nin_cifar100', '2839', '891047637c63f274d4138a430fcaf5f92f054ad4', 'v0.0.183'), ('nin_svhn', '0376', '2fbe48d0dd165c97acb93cf0edcf4b847651e3a0', 'v0.0.270'), ('resnet20_cifar10', '0597', '15145d2e00c85b5c295b6999068ce4b494febfb0', 'v0.0.163'), ('resnet20_cifar100', '2964', '6a85f07e9bda4721ee68f9b7350250b866247324', 'v0.0.180'), ('resnet20_svhn', '0343', 'b6c1dc9982e1ee04f089ca02d5a3dbe549b18c02', 'v0.0.265'), ('resnet56_cifar10', '0452', 'eb7923aa7d53e4e9951483b05c9629010fbd75a4', 'v0.0.163'), ('resnet56_cifar100', '2488', '2d641cdef73a9cdc440d7ebfb665167907a6b3bd', 'v0.0.181'), ('resnet56_svhn', '0275', 'cf18a0720e4e73e5d36832e24a36b78351f9c266', 'v0.0.265'), ('resnet110_cifar10', '0369', '27d76fce060ce5737314f491211734bd10c60308', 'v0.0.163'), ('resnet110_cifar100', '2280', 'd2ec4ff1c85095343031a0b11a671c4799ae1187', 'v0.0.190'), ('resnet110_svhn', '0245', 'f274056a4f3b187618ab826aa6e3ade028a3a4da', 'v0.0.265'), ('resnet164bn_cifar10', '0368', 'd86593667f30bfef0c0ad237f2da32601b048312', 'v0.0.179'), ('resnet164bn_cifar100', '2044', '190ab6b485404e43c41a85542e57adb051744aa0', 'v0.0.182'), ('resnet164bn_svhn', '0242', 'b4c1c66ccc47f0802058fcd469844811f214bbca', 'v0.0.267'), ('resnet272bn_cifar10', '0333', 'b7c6902a5e742b2c46c9454be5962f9a5e5a0fa5', 'v0.0.368'), ('resnet272bn_cifar100', '2007', 'fe6b27f8b18785d568719dfbaea79ae05eb0aefe', 'v0.0.368'), ('resnet272bn_svhn', '0243', '693d5c393d2823146a1bdde0f8b11bb21ccd8c12', 'v0.0.368'), ('resnet542bn_cifar10', '0343', 'b6598e7a0e5bd800b4425424b43274a96677e77b', 'v0.0.369'), ('resnet542bn_cifar100', '1932', '4f95b380a755ae548187bfa0da038565c50e1e26', 'v0.0.369'), ('resnet542bn_svhn', '0234', '7421964d2246a7b5ba7f9baf294cc3bd06329ad8', 'v0.0.369'), ('resnet1001_cifar10', '0328', '0e27556cdc97b7d0612d4518546a9b0479e030c3', 'v0.0.201'), ('resnet1001_cifar100', '1979', '6416c8d2f86debf42f1a3798e4b53fa8d94b0347', 'v0.0.254'), ('resnet1001_svhn', '0241', 'c8b23d4c50359cac2fbd837ed754cc4ea7b3b060', 'v0.0.408'), ('resnet1202_cifar10', '0353', 'd82bb4359d16e68989547f8b1153c8f23264e46c', 'v0.0.214'), ('resnet1202_cifar100', '2156', '711136021e134b4180cc49c7bb1dda2bd0d4ab49', 'v0.0.410'), ('preresnet20_cifar10', '0651', '5cf94722c7969e136e2174959fee4d7b95528f54', 'v0.0.164'), ('preresnet20_cifar100', '3022', 'e3fd9391a621da1afd77f1c09ae0c9bdda4e17aa', 'v0.0.187'), ('preresnet20_svhn', '0322', '8e56898f75a9ba2c016b1e14e880305e55a96ea7', 'v0.0.269'), ('preresnet56_cifar10', '0449', '73ea193a6f184d034a4b5b911fe6d23473eb0220', 'v0.0.164'), ('preresnet56_cifar100', '2505', 'f879fb4e9c9bc328b97ca8999575ea29343bbd79', 'v0.0.188'), ('preresnet56_svhn', '0280', 'f512407305efa862c899a56cfc86003ee9ca0e9f', 'v0.0.269'), ('preresnet110_cifar10', '0386', '544ed0f0e0b3c0da72395924e2ea381dbf381e52', 'v0.0.164'), ('preresnet110_cifar100', '2267', '4e010af04fefb74f6535a1de150f695460ec0550', 'v0.0.191'), ('preresnet110_svhn', '0279', '8dcd3ae54540a62f6a9b87332f0aa2abfc587600', 'v0.0.269'), ('preresnet164bn_cifar10', '0364', 'c0ff243801f078c6e6be72e1d3b67d88d61c4454', 'v0.0.196'), ('preresnet164bn_cifar100', '2018', '5228dfbdebf0f4699dae38a4a9b8310b08189d48', 'v0.0.192'), ('preresnet164bn_svhn', '0258', '69de71f53eee796710e11dae53f10ed276588df0', 'v0.0.269'), ('preresnet272bn_cifar10', '0325', '8f8f375dfca98fb0572b2de63ca3441888c52a88', 'v0.0.389'), ('preresnet272bn_cifar100', '1963', '52a0ebabfa75366e249e612b9556c87618acf41e', 'v0.0.389'), ('preresnet272bn_svhn', '0234', 'b2cc8842932feb8f04547d5341f00ef2a3846d8a', 'v0.0.389'), ('preresnet542bn_cifar10', '0314', '86a2b5f51c4e8064ba3093472a65e52e4d65f6be', 'v0.0.391'), ('preresnet542bn_cifar100', '1871', 'd7343a662a78d29fe14f98e7dba6d79096f43904', 'v0.0.391'), ('preresnet542bn_svhn', '0236', '67f372d8a906e75f2aa3a32396e757851fd6e1fd', 'v0.0.391'), ('preresnet1001_cifar10', '0265', '1f3028bdf7143b8f99340b1b1a0a8e029d7020a0', 'v0.0.209'), ('preresnet1001_cifar100', '1841', 'fcbddbdb462da0d77c50026878ea2cfb6a95f5d4', 'v0.0.283'), ('preresnet1202_cifar10', '0339', 'cc2bd85a97842f7a444deb78262886a264a42c25', 'v0.0.246'), ('resnext29_32x4d_cifar10', '0315', '442eca6c30448563f931174d37796c2f08c778b7', 'v0.0.169'), ('resnext29_32x4d_cifar100', '1950', 'de139852f2876a04c74c271d50f0a50ba75ece3e', 'v0.0.200'), ('resnext29_32x4d_svhn', '0280', '0a402faba812ae0b1238a6da95adc734a5a24f16', 'v0.0.275'), ('resnext29_16x64d_cifar10', '0241', 'e80d3cb5f8d32be2025fe8fb7a7369b2d004217e', 'v0.0.176'), ('resnext29_16x64d_cifar100', '1693', '762f79b3506528f817882c3a47252c2f42e9376b', 'v0.0.322'), ('resnext29_16x64d_svhn', '0268', '04ffa5396ae4a61e60a30f86cd5180611ce94772', 'v0.0.358'), ('resnext272_1x64d_cifar10', '0255', '1ca6630049e54d9d17887c0af26ab6f848d30067', 'v0.0.372'), ('resnext272_1x64d_cifar100', '1911', '9a9b397c1091c6bd5b0f4b13fb6567a99d7aa7ac', 'v0.0.372'), ('resnext272_1x64d_svhn', '0235', 'b12f9d9ce073c72c2e5509a27a5dd065a7b5d05f', 'v0.0.372'), ('resnext272_2x32d_cifar10', '0274', '94e492a4391e589e6722a91ddc8b18df4dc89ed0', 'v0.0.375'), ('resnext272_2x32d_cifar100', '1834', 'bbc0c87cad70745f2aa86241521449ab7f9fd3bf', 'v0.0.375'), ('resnext272_2x32d_svhn', '0244', 'd9432f639120985968afc9b1bdde666ceaad53c9', 'v0.0.375'), ('seresnet20_cifar10', '0601', '143eba2ad59cc9f7e539d97445eb4fe13aad1a6e', 'v0.0.362'), ('seresnet20_cifar100', '2854', '1240e42f79500ddca2e471f543ff1aa28f20af16', 'v0.0.362'), ('seresnet20_svhn', '0323', '6c611f0a860d7a0c161602bfc268ccb8563376ee', 'v0.0.362'), ('seresnet56_cifar10', '0413', '66486cdbab43e244883ca8f26aa93da2297f9468', 'v0.0.362'), ('seresnet56_cifar100', '2294', 'ab7e54434bdee090f0694d3ba96122c441b7753b', 'v0.0.362'), ('seresnet56_svhn', '0264', '0a017d76364bb219b35aa2a792291acb1554e251', 'v0.0.362'), ('seresnet110_cifar10', '0363', '9a85ff9521387e1155437e691d5ccb411b28e441', 'v0.0.362'), ('seresnet110_cifar100', '2086', '298d298ea6747ff9f9277be08838f723c239e4e3', 'v0.0.362'), ('seresnet110_svhn', '0235', '525399af7c6f717aabc6c1c024c863191a1a28d9', 'v0.0.362'), ('seresnet164bn_cifar10', '0339', '4c59e76fc3264532142b37db049d3ff422b6d5f4', 'v0.0.362'), ('seresnet164bn_cifar100', '1995', 'cdac82fd3133bfd4d8cd261016a68fe95928ea4b', 'v0.0.362'), ('seresnet164bn_svhn', '0245', '31e8d2beeeb74a444ff756cafc7f1b557009cddc', 'v0.0.362'), ('seresnet272bn_cifar10', '0339', '8081d1be9a5eb985c828b6f60e41b3d689c84659', 'v0.0.390'), ('seresnet272bn_cifar100', '1907', 'a83ac8d69535cfb394be7e790ff9683d65e2b3f9', 'v0.0.390'), ('seresnet272bn_svhn', '0238', '2b28cd779296d2afbb789cee7b73a80b4b07e4a9', 'v0.0.390'), ('seresnet542bn_cifar10', '0347', 'e67d0c059a4f5c2e97790eb50d03013430f5a2fd', 'v0.0.385'), ('seresnet542bn_cifar100', '1887', 'dac530d68dff49ec37756212d3f9b52c256448fb', 'v0.0.385'), ('seresnet542bn_svhn', '0226', '9571b88bd6ac07407a453651feb29b376609933c', 'v0.0.385'), ('sepreresnet20_cifar10', '0618', 'cbc1c4df6061046a7cf99e5739a5c5df811da420', 'v0.0.379'), ('sepreresnet20_cifar100', '2831', 'e54804186c83656f8d9705ff021fd83772a0c6eb', 'v0.0.379'), ('sepreresnet20_svhn', '0324', '04dafec1e0490ecc7001a0ca9547b60ba6314956', 'v0.0.379'), ('sepreresnet56_cifar10', '0451', '0b34942c73cd2d196aa01763fb5167cb78f2b56d', 'v0.0.379'), ('sepreresnet56_cifar100', '2305', '1138b50001119765d50eeaf10a3fca15ccf6040a', 'v0.0.379'), ('sepreresnet56_svhn', '0271', '150740af292a0c5c8a6d499dfa13b2a2c5672e60', 'v0.0.379'), ('sepreresnet110_cifar10', '0454', '4c062f46d2ec615cbfc0e07af12febcddcd16364', 'v0.0.379'), ('sepreresnet110_cifar100', '2261', 'b525d8b1568e1cad021026930f5b5283bdba8b49', 'v0.0.379'), ('sepreresnet110_svhn', '0259', 'eec4c9f3c94cad32557f0a969a8ec1d127877ab6', 'v0.0.379'), ('sepreresnet164bn_cifar10', '0373', 'e82ad7ffc78c00ad128ab4116dbd3f3eae028c19', 'v0.0.379'), ('sepreresnet164bn_cifar100', '2005', 'baf00211c3da54ddf50000629b8419da8af599d8', 'v0.0.379'), ('sepreresnet164bn_svhn', '0256', '36362d66943c89b7b7153eeaf0cfc2113369b6d5', 'v0.0.379'), ('sepreresnet272bn_cifar10', '0339', '02e141138736d647bcbdb4f0fc0d81a7bc8bef85', 'v0.0.379'), ('sepreresnet272bn_cifar100', '1913', 'd37b7af28056f42bbd11df19479cbdb0b0ac7f63', 'v0.0.379'), ('sepreresnet272bn_svhn', '0249', '44b18f81ea4ba5ec6a7ea725fc9c0798a670c161', 'v0.0.379'), ('sepreresnet542bn_cifar10', '0308', '1e726874123afc10d24cf58779347b13fdfa3b00', 'v0.0.382'), ('sepreresnet542bn_cifar100', '1945', 'aadac5fbe15f5227ff02cdf9abf3c2f27b602db4', 'v0.0.382'), ('sepreresnet542bn_svhn', '0247', 'ff5682df9a051821a4fda0a1f1fe81dbf96da479', 'v0.0.382'), ('pyramidnet110_a48_cifar10', '0372', '965fce37e26ef4e3724df869fe90283669fe9daf', 'v0.0.184'), ('pyramidnet110_a48_cifar100', '2095', 'b74f12c8d11de3ddd9fa51fe93c1903675a43a3c', 'v0.0.186'), ('pyramidnet110_a48_svhn', '0247', 'e750bd672b24bb60eca0527fd11f9866a9fc8329', 'v0.0.281'), ('pyramidnet110_a84_cifar10', '0298', '7b38a0f65de0bec2f4ceb83398fef61009a2c129', 'v0.0.185'), ('pyramidnet110_a84_cifar100', '1887', '842b3809619ec81c6e27defcad9df5c3dbc0ae55', 'v0.0.199'), ('pyramidnet110_a84_svhn', '0243', '56b06d8fd9ec043ccf5acc0b8a129bee2ef9a901', 'v0.0.392'), ('pyramidnet110_a270_cifar10', '0251', 'b3456ddd5919ef861ec607f8287bd071de0ba077', 'v0.0.194'), ('pyramidnet110_a270_cifar100', '1710', '56ae71355de25daafe34c51b91fe5b4bdab1f6ac', 'v0.0.319'), ('pyramidnet110_a270_svhn', '0238', 'fdf9f2da74bae9d4280f329554a12c9770fde52f', 'v0.0.393'), ('pyramidnet164_a270_bn_cifar10', '0242', '783e21b5856a46ee0087535776703eb7ca0c24ae', 'v0.0.264'), ('pyramidnet164_a270_bn_cifar100', '1670', '7614c56c52d9a6ca42d0446ab7b5c9a5e4eae63f', 'v0.0.312'), ('pyramidnet164_a270_bn_svhn', '0233', '6dcd188245b4c4edc8a1c751cd54211d26e2c603', 'v0.0.396'), ('pyramidnet200_a240_bn_cifar10', '0244', '89ae1856e23a67aac329df11775346e6bf8e00b7', 'v0.0.268'), ('pyramidnet200_a240_bn_cifar100', '1609', '0729db3729da20627c7e91bd1e9beff251f2b82c', 'v0.0.317'), ('pyramidnet200_a240_bn_svhn', '0232', 'b5876d02190e3e6a7dc7c0cd6e931e96151c34e9', 'v0.0.397'), ('pyramidnet236_a220_bn_cifar10', '0247', '6b9a29664f54d8ea82afc863670a79099e6f570a', 'v0.0.285'), ('pyramidnet236_a220_bn_cifar100', '1634', 'fd14728bc8ca8ccb205880d24d38740dad232d00', 'v0.0.312'), ('pyramidnet236_a220_bn_svhn', '0235', 'bb39a3c6f8ee25c32a40304ebf266a9521b513c4', 'v0.0.398'), ('pyramidnet272_a200_bn_cifar10', '0239', '533f8d89abe57656e1baef549dabedbc4dcefbe8', 'v0.0.284'), ('pyramidnet272_a200_bn_cifar100', '1619', '4ba0ea07d5f519878d33f7b3741f742ae12fef50', 'v0.0.312'), ('pyramidnet272_a200_bn_svhn', '0240', '2ace26878c803cc3a415d8f897bf9d3ec7f4d19c', 'v0.0.404'), ('densenet40_k12_cifar10', '0561', 'a37df881a11487fdde772254a82c20c3e45b461b', 'v0.0.193'), ('densenet40_k12_cifar100', '2490', 'd06839db7eec0331354ca31b421c6fbcd4665fd3', 'v0.0.195'), ('densenet40_k12_svhn', '0305', '8d563cdf9dcd1d4822669f6119f6e77b4e03c162', 'v0.0.278'), ('densenet40_k12_bc_cifar10', '0643', '234918e7144b95454e1417035c73391663a68401', 'v0.0.231'), ('densenet40_k12_bc_cifar100', '2841', '968e5667c29dd682a90c3f8a488e00a9efe0d29f', 'v0.0.232'), ('densenet40_k12_bc_svhn', '0320', '52bd79007dd8a8b60b9aef94a555161c9faf4b37', 'v0.0.279'), ('densenet40_k24_bc_cifar10', '0452', '3ec459af58cf2106bfcbdad090369a1f3d41ef3c', 'v0.0.220'), ('densenet40_k24_bc_cifar100', '2267', 'f744296d04d703c202b0b78cdb32e7fc40116584', 'v0.0.221'), ('densenet40_k24_bc_svhn', '0290', '268af51aaea47003c9ce128ddb76507dabb0726e', 'v0.0.280'), ('densenet40_k36_bc_cifar10', '0404', '6be4225a6d0e5fb68bdc9cda471207c0b5420395', 'v0.0.224'), ('densenet40_k36_bc_cifar100', '2050', '49b6695fe06d98cfac5d4fdbdb716edb268712c2', 'v0.0.225'), ('densenet40_k36_bc_svhn', '0260', '47ef4d80ef3f541b795a1aee645ff9e8bada6101', 'v0.0.311'), ('densenet100_k12_cifar10', '0366', '85031735e1c80d3a6254fe8649c5e9bae2d54315', 'v0.0.205'), ('densenet100_k12_cifar100', '1964', 'f04f59203ad863f466c25fa9bbfc18686d72a46a', 'v0.0.206'), ('densenet100_k12_svhn', '0260', 'c57bbabec45492bcc4a2587443b06bf400c6ea25', 'v0.0.311'), ('densenet100_k24_cifar10', '0313', '939ef3090b6219e5afabc97f03cc34365c729ada', 'v0.0.252'), ('densenet100_k24_cifar100', '1808', '47274dd8a35bfeb77e9a077275111e4a94d561e4', 'v0.0.318'), ('densenet100_k12_bc_cifar10', '0416', '160a064165eddf492970a99b5a8ca9689bf94fea', 'v0.0.189'), ('densenet100_k12_bc_cifar100', '2119', 'a37ebc2a083fbe8e7642988945d1092fb421f182', 'v0.0.208'), ('densenet190_k40_bc_cifar10', '0252', '57f2fa706376545c260f4848a1112cd03069a323', 'v0.0.286'), ('densenet250_k24_bc_cifar10', '0267', '03b268872cdedadc7196783664b4d6e72b00ecd2', 'v0.0.290'), ('densenet250_k24_bc_cifar100', '1739', '9100f02ada0459792e3305feddda602e3278833a', 'v0.0.303'), ('xdensenet40_2_k24_bc_cifar10', '0531', 'd3c448ab2c110f873579093ff9a69e735d80b4e7', 'v0.0.226'), ('xdensenet40_2_k24_bc_cifar100', '2396', '84357bb40bcd1da5cf6237ea5755a309bcf36d49', 'v0.0.227'), ('xdensenet40_2_k24_bc_svhn', '0287', '065f384765a1eaaba26d1d9224878658cbb9cb84', 'v0.0.306'), ('xdensenet40_2_k36_bc_cifar10', '0437', 'fb6d7431c005eb9965da0e1b2872c048d6b31b30', 'v0.0.233'), ('xdensenet40_2_k36_bc_cifar100', '2165', '9ac51e902167ba05f1c21ed1a9690c1fd4cad3eb', 'v0.0.234'), ('xdensenet40_2_k36_bc_svhn', '0274', 'bf7f7de9f9b9661385a47b5e241fdc0c54287a8c', 'v0.0.306'), ('wrn16_10_cifar10', '0293', '4ac60015e3b287580d11e605793b3426e8184137', 'v0.0.166'), ('wrn16_10_cifar100', '1895', 'd6e852788e29532c8a12bb39617a2e81aba2483f', 'v0.0.204'), ('wrn16_10_svhn', '0278', 'b87185c815b64a1290ecbb7a217447906c77da75', 'v0.0.271'), ('wrn28_10_cifar10', '0239', 'f8a24941ca542f78eda2d192f461b1bac0600d27', 'v0.0.166'), ('wrn28_10_cifar100', '1788', '603872998b7d9f0303769cb34c4cfd16d4e09258', 'v0.0.320'), ('wrn28_10_svhn', '0271', '59f255be865678bc0d3c7dcc9785022f30265d69', 'v0.0.276'), ('wrn40_8_cifar10', '0237', '3f56f24a07be7155fb143cc4360755d564e3761a', 'v0.0.166'), ('wrn40_8_cifar100', '1803', '794aca6066fb993f2a5511df45fca58d6bc546e7', 'v0.0.321'), ('wrn40_8_svhn', '0254', '8af6aad0c2034ed8a574f74391869a0d20def51b', 'v0.0.277'), ('wrn20_10_1bit_cifar10', '0326', '3288c59a265fc3531502b9c53e33322ff74dd33f', 'v0.0.302'), ('wrn20_10_1bit_cifar100', '1904', '1c6f1917c49134da366abfbd27c1d7ad61182882', 'v0.0.302'), ('wrn20_10_1bit_svhn', '0273', '4d7bfe0dfa88d593f691b39ca9d20eb3e78636ea', 'v0.0.302'), ('wrn20_10_32bit_cifar10', '0314', '90b3fc15d99009b35b1939baefa2e2290003968a', 'v0.0.302'), ('wrn20_10_32bit_cifar100', '1812', '346f276fe7e6b61cc93482fdb3d471064d1e1de3', 'v0.0.302'), ('wrn20_10_32bit_svhn', '0259', 'af3fddd1f68f373038eea1828e7ae15d21a03ef9', 'v0.0.302'), ('ror3_56_cifar10', '0543', '7ca1b24c4a573d53484ca92b19bad5c08e38fa8b', 'v0.0.228'), ('ror3_56_cifar100', '2549', 'a7903e5f5f80bf53c07e12ce34659e0d9af4b106', 'v0.0.229'), ('ror3_56_svhn', '0269', '113859bb3c23fde05fce740647a26dca69678a34', 'v0.0.287'), ('ror3_110_cifar10', '0435', 'bf021f253fc1cf29b30a1eb579c7c4693f963933', 'v0.0.235'), ('ror3_110_cifar100', '2364', '13de922a8f8758a15eaf1d283dc42e7dcf0f3fda', 'v0.0.236'), ('ror3_110_svhn', '0257', '4b8b6963fd73753104945853a65210de84c9fb4c', 'v0.0.287'), ('ror3_164_cifar10', '0393', '7ac7b44610acdb065f40b62e94d5ec5dbb49ee11', 'v0.0.294'), ('ror3_164_cifar100', '2234', 'd5a5321048d06f554a8c7688b743c32da830372b', 'v0.0.294'), ('ror3_164_svhn', '0273', '1d0a2f127a194ea923857c1d8ec732ae5fa87300', 'v0.0.294'), ('rir_cifar10', '0328', '9780c77d0ab1c63478531557ab1aff77c208ad0d', 'v0.0.292'), ('rir_cifar100', '1923', '4bfd2f239ecca391c116cbc02d2ef7e5e2a54028', 'v0.0.292'), ('rir_svhn', '0268', '5240bc967aa1fc1e9df2b31919178203dcaa582a', 'v0.0.292'), ('shakeshakeresnet20_2x16d_cifar10', '0515', 'e2f524b5196951f48495973a087135ca974ec327', 'v0.0.215'), ('shakeshakeresnet20_2x16d_cifar100', '2922', '84772a31f6f6bb3228276515a8d4371c25925c85', 'v0.0.247'), ('shakeshakeresnet20_2x16d_svhn', '0317', '261fd59fcb7cf375331ce0c402ad2030b283c17c', 'v0.0.295'), ('shakeshakeresnet26_2x32d_cifar10', '0317', '5422fce187dff99fa8f4678274a8dd1519e23e27', 'v0.0.217'), ('shakeshakeresnet26_2x32d_cifar100', '1880', '750a574e738cf53079b6965410e07fb3abef82fd', 'v0.0.222'), ('shakeshakeresnet26_2x32d_svhn', '0262', '844e1f6d067b830087b9456617159a77137138f7', 'v0.0.295'), ('diaresnet20_cifar10', '0622', '1c5f4c8adeb52090b5d1ee7330f02b96d4aac843', 'v0.0.340'), ('diaresnet20_cifar100', '2771', '350c5ed4fa58bf339b8b44f19044d75ee14917cf', 'v0.0.342'), ('diaresnet20_svhn', '0323', 'f37bac8b8843319d2934a79e62c0e7365addef2f', 'v0.0.342'), ('diaresnet56_cifar10', '0505', '4073bb0c53d239a40c6cf7ee634f32096b1d54dd', 'v0.0.340'), ('diaresnet56_cifar100', '2435', '22e777d2b708b1fc8eb79e593130fa660b51dd95', 'v0.0.342'), ('diaresnet56_svhn', '0268', '7ea0022b7eff7afd1bb53e81d579e23952f9ee7f', 'v0.0.342'), ('diaresnet110_cifar10', '0410', '5d0517456f3d535722d4f3fade53146ffd8e9f5f', 'v0.0.340'), ('diaresnet110_cifar100', '2211', '4c6aa3fe0a58d54ce04061df8440b798b73c9c4b', 'v0.0.342'), ('diaresnet110_svhn', '0247', '515ce8f3ddc01b00747b839e8b52387f231f482f', 'v0.0.342'), ('diaresnet164bn_cifar10', '0350', '27cfe80d62974bfc1d3aa52e1fd1d173d5067393', 'v0.0.340'), ('diaresnet164bn_cifar100', '1953', '18aa50ab105095688597937fcafdbae1d5518597', 'v0.0.342'), ('diaresnet164bn_svhn', '0244', '4773b5183a25ef906e176079f3cae8641a167e13', 'v0.0.342'), ('diapreresnet20_cifar10', '0642', 'bfcfd5c633e563036061d10d420ea6878f102ddb', 'v0.0.343'), ('diapreresnet20_cifar100', '2837', '936a4acca4a570be185c6338e0a76c8d8cee78a9', 'v0.0.343'), ('diapreresnet20_svhn', '0303', 'd682b80f3a2f5d126eac829dc3a55d800a6e3998', 'v0.0.343'), ('diapreresnet56_cifar10', '0483', 'd5229916f76180aa66a08d89645c1cdd1bbf4bf1', 'v0.0.343'), ('diapreresnet56_cifar100', '2505', '9867b907f721c3688bc9577e2d30e71aac14e163', 'v0.0.343'), ('diapreresnet56_svhn', '0280', '7a984a6375979ecce61576cc371ed5170a4b2cd2', 'v0.0.343'), ('diapreresnet110_cifar10', '0425', '9fab76b9a11b246b0e06386879b29196af002de5', 'v0.0.343'), ('diapreresnet110_cifar100', '2269', '0af00d413f9c7022ebec87256760b40ccb30e944', 'v0.0.343'), ('diapreresnet110_svhn', '0242', '2bab754f7a7d426eb5a1f40c3156e2c82aa145c2', 'v0.0.343'), ('diapreresnet164bn_cifar10', '0356', '7a0b124307fe307489743d8648e99239e14b764a', 'v0.0.343'), ('diapreresnet164bn_cifar100', '1999', 'a3835edf5ae8daa0383e8d13fedf3a8dc8352338', 'v0.0.343'), ('diapreresnet164bn_svhn', '0256', '30de9b3b60e03ab5c44bf7d9b571f63a9065890d', 'v0.0.343'), ('resnet10_cub', '2760', 'e8bdefb0f503d253197370a2d9d5ae772b2cb913', 'v0.0.335'), ('resnet12_cub', '2667', '22b2b21696461aa952a257014f4f0ec901375ac5', 'v0.0.336'), ('resnet14_cub', '2434', '57f6a73d2eb22d7dfc43a8ff52f25982e1b7d78b', 'v0.0.337'), ('resnet16_cub', '2321', '5e48b19f8fb8eae1afcdf04e77ae3ad9ad9c6b73', 'v0.0.338'), ('resnet18_cub', '2333', 'c32998b4b12e31b9d291770bbf3eb38490542e38', 'v0.0.344'), ('resnet26_cub', '2261', '56c8fcc12333fec68ac09c6696bb462e175be047', 'v0.0.345'), ('seresnet10_cub', '2742', 'b8e56acfe873705609c82932c321467169436531', 'v0.0.361'), ('seresnet12_cub', '2599', '9c0ee8cf33733bf5ba66eeda7394c84ed11d3d7e', 'v0.0.361'), ('seresnet14_cub', '2368', 'b58cddb7b2cc8f5c40a83912690eeff8d4d6d418', 'v0.0.361'), ('seresnet16_cub', '2318', '1d8b187c417832ac3f19806ff13f1897c7692f4f', 'v0.0.361'), ('seresnet18_cub', '2321', '7b1d02a7965a3f54606d768e0e5149148f2fb0b1', 'v0.0.361'), ('seresnet26_cub', '2254', '5cbf65d229088b3f16e396a05bde054470c14563', 'v0.0.361'), ('mobilenet_w1_cub', '2356', '02c2accf0f92fcc460cdbb6b41a581321e1fa216', 'v0.0.346'), ('proxylessnas_mobile_cub', '2190', 'a9c66b1b9623f81105b9daf8c5e45f4501e80bbe', 'v0.0.347'), ('ntsnet_cub', '1286', '4d7595248f0fb042ef06c657d73bd0a2f3fc4f0d', 'v0.0.334'), ('pspnet_resnetd101b_voc', '7626', 'f90c0db9892ec6892623a774ba21000f7cc3995f', 'v0.0.297'), ('pspnet_resnetd50b_ade20k', '2746', '7b7ce5680fdfab567222ced11a2430cf1a452116', 'v0.0.297'), ('pspnet_resnetd101b_ade20k', '3286', 'c5e619c41740751865f662b539abbad5dd9be42b', 'v0.0.297'), ('pspnet_resnetd101b_cityscapes', '5757', '2e2315d45b83479c507a4e7a47dac6a68a8e3e1c', 'v0.0.297'), ('pspnet_resnetd101b_coco', '5467', '690335581310128a1d11fcdb0eb03ce07fb5f88d', 'v0.0.297'), ('deeplabv3_resnetd101b_voc', '7566', '6a4f805fe1433898d1dc665bb10a5620816999bd', 'v0.0.298'), ('deeplabv3_resnetd152b_voc', '7806', '1c3089b5034043e4a82567ae28b085d694e5319c', 'v0.0.298'), ('deeplabv3_resnetd50b_ade20k', '3196', '00903dce3d63fd847c36617d51907cff12834d06', 'v0.0.298'), ('deeplabv3_resnetd101b_ade20k', '3517', '46828740498741a7291fd479901dfba3d3de3b11', 'v0.0.298'), ('deeplabv3_resnetd101b_coco', '5906', '2811b3cd3512c237faef59f746d984823892d9e5', 'v0.0.298'), ('deeplabv3_resnetd152b_coco', '6107', '80ddcd964c41906f4bc104cf5b087303a06aa79f', 'v0.0.298'), ('fcn8sd_resnetd101b_voc', '8040', '3568dc41c137cbe797c1baa7b5a76669faf1ceb0', 'v0.0.299'), ('fcn8sd_resnetd50b_ade20k', '3339', '1d03bc38ea64551806ddfd4185b5eb49fb9e160f', 'v0.0.299'), ('fcn8sd_resnetd101b_ade20k', '3588', 'ff385e1913bc8c05c6abe9cb19896f477b9b75a7', 'v0.0.299'), ('fcn8sd_resnetd101b_coco', '6011', '4a469997cdc3e52c1dee1a2d58578f9df54c419b', 'v0.0.299'), ('icnet_resnetd50b_cityscapes', '6078', '04f581dc985f3d2874e8530bb70e529302e9d3dd', 'v0.0.457'), ('fastscnn_cityscapes', '6595', '6dca42601bbba8134afa11674ba606231e30f035', 'v0.0.474'), ('sinet_cityscapes', '6084', 'c0a4e992f64c042ac815b87fe8d37919a693d0ad', 'v0.0.437'), ('bisenet_resnet18_celebamaskhq', '0000', 'c3bd2251b86e4fce29a3d1fb7600c6259d4d6523', 'v0.0.462'), ('danet_resnetd50b_cityscapes', '6799', 'dcef11be5a3e3984877c9d2b8644a630938eb25a', 'v0.0.468'), ('danet_resnetd101b_cityscapes', '6810', 'a6593e21091fb7d96989866381fe484de50a5d70', 'v0.0.468'), ('alphapose_fastseresnet101b_coco', '7415', 'c1aee8e0e4aaa1352d728ad5f147d77b9ebeff8d', 'v0.0.454'), ('simplepose_resnet18_coco', '6631', 'e267629f3da46f502914d84c10afb52a5ea12e3b', 'v0.0.455'), ('simplepose_resnet50b_coco', '7102', '78b005c871baaf5a77d7c8de41eac8ec01b7d942', 'v0.0.455'), ('simplepose_resnet101b_coco', '7244', '59f85623525928ba8601eefc81c781f0a48dd72e', 'v0.0.455'), ('simplepose_resnet152b_coco', '7253', '6228ce42852da4e01b85917f234bf74cc0962e8f', 'v0.0.455'), ('simplepose_resneta50b_coco', '7170', 'e45c65255002eb22c2aa39ff4ee4d7d1c902467c', 'v0.0.455'), ('simplepose_resneta101b_coco', '7297', '800500538da729d33bd7e141b3b7c80738b33c47', 'v0.0.455'), ('simplepose_resneta152b_coco', '7344', 'ac76d0a9dd51dcbe770ce3044567bc53f21d8fc4', 'v0.0.455'), ('simplepose_mobile_resnet18_coco', '6625', 'a5201083587dbc1f9e0b666285872f0ffcb23f88', 'v0.0.456'), ('simplepose_mobile_resnet50b_coco', '7110', '6d17c89b71fa02db4903ac4ba08922c1c267dcf5', 'v0.0.456'), ('simplepose_mobile_mobilenet_w1_coco', '6410', '14efcbbaf1be6e08448a89feb3161e572466de20', 'v0.0.456'), ('simplepose_mobile_mobilenetv2b_w1_coco', '6374', '73b90839e07f59decdbc11cbffff196ed148e1d9', 'v0.0.456'), ('simplepose_mobile_mobilenetv3_small_w1_coco', '5434', 'cc5169a3ac2cb3311d02bc4752abc0f799bc4492', 'v0.0.456'), ('simplepose_mobile_mobilenetv3_large_w1_coco', '6367', 'b93dbd09bdb07fd33732aaf9e782148cbb394cd3', 'v0.0.456'), ('lwopenpose2d_mobilenet_cmupan_coco', '3999', '0a2829dcb84ea39a401dbfb6b4635d68cc1e23ca', 'v0.0.458'), ('lwopenpose3d_mobilenet_cmupan_coco', '3999', 'ef1e8e130485a5df9864db59f93ddeb892c11a46', 'v0.0.458'), ('ibppose_coco', '6487', '70158be1fc226d4b3608d02273898e887edf744a', 'v0.0.459'), ('jasperdr10x5_en', 'nana', '9e212ca84c4ecd876462c289754ab71fa845c445', 'v0.0.555'), ('jasperdr10x5_en_nr', 'nana', 'cf4b9f5320ed72868104d0bfb44a6012be348a0b', 'v0.0.555'), ('quartznet5x5_en_ls', 'nana', '3651852c5a78ae2c9f2f58aa6936bc75db83a30a', 'v0.0.555'), ('quartznet15x5_en', 'nana', 'd41a53cf8fc87d229d7c464a034ab33347d05e8f', 'v0.0.555'), ('quartznet15x5_en_nr', 'nana', 'c73f88532c594801f76b7db921f1c7c3fcf08fc0', 'v0.0.555'), ('quartznet15x5_de', 'nana', '02b5f71ff83de19d97f6770566b0d57cc2ea1bb9', 'v0.0.555'), ('quartznet15x5_fr', 'nana', '62c42726412bd3b97a668632f78225351bf040ed', 'v0.0.555'), ('quartznet15x5_it', 'nana', '6712dfefb2f1cb997f467ffb7feb0ceb94f2a046', 'v0.0.555'), ('quartznet15x5_es', 'nana', '96f14570ed8f75d1b2ebb1e13e20dd28a128c99b', 'v0.0.555'), ('quartznet15x5_ca', 'nana', 'a8ba8cb3da5bda15a6a0d2fb30ced54459a0f0ff', 'v0.0.555'), ('quartznet15x5_pl', 'nana', 'a1ea93770043bd852a21a4e8c29c366268ce27d5', 'v0.0.555'), ('quartznet15x5_ru', 'nana', 'cb5585439804c94ef9d8d39a5bc483932fe4acd0', 'v0.0.555'), ('quartznet15x5_ru34', 'nana', 'b4dd1c93ecb01dd79276f3e13bec8d36c6249d02', 'v0.0.555'), ]} imgclsmob_repo_url = 'https://github.com/osmr/imgclsmob' def get_model_name_suffix_data(model_name): if model_name not in _model_sha1: raise ValueError("Pretrained model for {name} is not available.".format(name=model_name)) error, sha1_hash, repo_release_tag = _model_sha1[model_name] return error, sha1_hash, repo_release_tag def get_model_file(model_name, local_model_store_dir_path=os.path.join("~", ".chainer", "models")): """ Return location for the pretrained on local file system. This function will download from online model zoo when model cannot be found or has mismatch. The root directory will be created if it doesn't exist. Parameters: ---------- model_name : str Name of the model. local_model_store_dir_path : str, default $CHAINER_HOME/models Location for keeping the model parameters. Returns: ------- file_path Path to the requested pretrained model file. """ error, sha1_hash, repo_release_tag = get_model_name_suffix_data(model_name) short_sha1 = sha1_hash[:8] file_name = "{name}-{error}-{short_sha1}.npz".format( name=model_name, error=error, short_sha1=short_sha1) local_model_store_dir_path = os.path.expanduser(local_model_store_dir_path) file_path = os.path.join(local_model_store_dir_path, file_name) if os.path.exists(file_path): if _check_sha1(file_path, sha1_hash): return file_path else: logging.warning("Mismatch in the content of model file detected. Downloading again.") else: logging.info("Model file not found. Downloading to {}.".format(file_path)) if not os.path.exists(local_model_store_dir_path): os.makedirs(local_model_store_dir_path) zip_file_path = file_path + ".zip" _download( url="{repo_url}/releases/download/{repo_release_tag}/{file_name}.zip".format( repo_url=imgclsmob_repo_url, repo_release_tag=repo_release_tag, file_name=file_name), path=zip_file_path, overwrite=True) with zipfile.ZipFile(zip_file_path) as zf: zf.extractall(local_model_store_dir_path) os.remove(zip_file_path) if _check_sha1(file_path, sha1_hash): return file_path else: raise ValueError("Downloaded file has different hash. Please try again.") def _download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True): """Download an given URL Parameters: ---------- url : str URL to download path : str, optional Destination path to store downloaded file. By default stores to the current directory with same name as in url. overwrite : bool, optional Whether to overwrite destination file if already exists. sha1_hash : str, optional Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified but doesn't match. retries : integer, default 5 The number of times to attempt the download in case of failure or non 200 return codes verify_ssl : bool, default True Verify SSL certificates. Returns: ------- str The file path of the downloaded file. """ import warnings try: import requests except ImportError: class requests_failed_to_import(object): pass requests = requests_failed_to_import if path is None: fname = url.split("/")[-1] # Empty filenames are invalid assert fname, "Can't construct file-name from this URL. Please set the `path` option manually." else: path = os.path.expanduser(path) if os.path.isdir(path): fname = os.path.join(path, url.split("/")[-1]) else: fname = path assert retries >= 0, "Number of retries should be at least 0" if not verify_ssl: warnings.warn( "Unverified HTTPS request is being made (verify_ssl=False). " "Adding certificate verification is strongly advised.") if overwrite or not os.path.exists(fname) or (sha1_hash and not _check_sha1(fname, sha1_hash)): dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname))) if not os.path.exists(dirname): os.makedirs(dirname) while retries + 1 > 0: # Disable pyling too broad Exception # pylint: disable=W0703 try: print("Downloading {} from {}...".format(fname, url)) r = requests.get(url, stream=True, verify=verify_ssl) if r.status_code != 200: raise RuntimeError("Failed downloading url {}".format(url)) with open(fname, "wb") as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) if sha1_hash and not _check_sha1(fname, sha1_hash): raise UserWarning("File {} is downloaded but the content hash does not match." " The repo may be outdated or download may be incomplete. " "If the 'repo_url' is overridden, consider switching to " "the default repo.".format(fname)) break except Exception as e: retries -= 1 if retries <= 0: raise e else: print("download failed, retrying, {} attempt{} left" .format(retries, "s" if retries > 1 else "")) return fname def _check_sha1(filename, sha1_hash): """Check whether the sha1 hash of the file content matches the expected hash. Parameters: ---------- filename : str Path to the file. sha1_hash : str Expected sha1 hash in hexadecimal digits. Returns: ------- bool Whether the file content matches the expected hash. """ sha1 = hashlib.sha1() with open(filename, "rb") as f: while True: data = f.read(1048576) if not data: break sha1.update(data) return sha1.hexdigest() == sha1_hash
59,290
75.901427
116
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/fastseresnet.py
""" Fast-SE-ResNet for ImageNet-1K, implemented in Chainer. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['FastSEResNet', 'fastseresnet101b'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, SEBlock, SimpleSequential from .resnet import ResBlock, ResBottleneck, ResInitBlock class FastSEResUnit(Chain): """ Fast-SE-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, stride, bottleneck, conv1_stride, use_se, **kwargs): super(FastSEResUnit, self).__init__(**kwargs) self.use_se = use_se self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.use_se: self.se = SEBlock( channels=out_channels, reduction=1, use_conv=False) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) if self.use_se: x = self.se(x) x = x + identity x = self.activ(x) return x class FastSEResNet(Chain): """ Fast-SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000, **kwargs): super(FastSEResNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 use_se = (j == 0) setattr(stage, "unit{}".format(j + 1), FastSEResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride, use_se=use_se)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_fastseresnet(blocks, bottleneck=None, conv1_stride=True, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create Fast-SE-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported Fast-SE-ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = FastSEResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def fastseresnet101b(**kwargs): """ Fast-SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_fastseresnet(blocks=101, conv1_stride=False, model_name="fastseresnet101b", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ fastseresnet101b, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != fastseresnet101b or weight_count == 55697960) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
9,707
31.686869
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/polynet.py
""" PolyNet for ImageNet-1K, implemented in Chainer. Original paper: 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,' https://arxiv.org/abs/1611.05725. """ __all__ = ['PolyNet', 'polynet'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import ConvBlock, conv1x1_block, conv3x3_block, Concurrent, ParametricSequential, ParametricConcurrent,\ SimpleSequential class PolyConv(Chain): """ PolyNet specific convolution block. A block that is used inside poly-N (poly-2, poly-3, and so on) modules. The Convolution layer is shared between all Inception blocks inside a poly-N module. BatchNorm layers are not shared between Inception blocks and therefore the number of BatchNorm layers is equal to the number of Inception blocks inside a poly-N module. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. num_blocks : int Number of blocks (BatchNorm layers). """ def __init__(self, in_channels, out_channels, ksize, stride, pad, num_blocks): super(PolyConv, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=True) for i in range(num_blocks): setattr(self, "bn{}".format(i + 1), L.BatchNormalization( size=out_channels, eps=1e-5)) self.activ = F.relu def __call__(self, x, index): x = self.conv(x) bn = getattr(self, "bn{}".format(index + 1)) x = bn(x) x = self.activ(x) return x def poly_conv1x1(in_channels, out_channels, num_blocks): """ 1x1 version of the PolyNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. num_blocks : int Number of blocks (BatchNorm layers). """ return PolyConv( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=1, pad=0, num_blocks=num_blocks) class MaxPoolBranch(Chain): """ PolyNet specific max pooling branch block. """ def __init__(self): super(MaxPoolBranch, self).__init__() with self.init_scope(): self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=0, cover_all=False) def __call__(self, x): x = self.pool(x) return x class Conv1x1Branch(Chain): """ PolyNet specific convolutional 1x1 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(Conv1x1Branch, self).__init__() with self.init_scope(): self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels) def __call__(self, x): x = self.conv(x) return x class Conv3x3Branch(Chain): """ PolyNet specific convolutional 3x3 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(Conv3x3Branch, self).__init__() with self.init_scope(): self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, pad=0) def __call__(self, x): x = self.conv(x) return x class ConvSeqBranch(Chain): """ PolyNet specific convolutional sequence branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of tuple of int List of numbers of output channels. kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int List of convolution window sizes. strides_list : list of tuple of int or tuple of tuple/list of 2 int List of strides of the convolution. padding_list : list of tuple of int or tuple of tuple/list of 2 int List of padding values for convolution layers. """ def __init__(self, in_channels, out_channels_list, kernel_size_list, strides_list, padding_list): super(ConvSeqBranch, self).__init__() assert (len(out_channels_list) == len(kernel_size_list)) assert (len(out_channels_list) == len(strides_list)) assert (len(out_channels_list) == len(padding_list)) with self.init_scope(): self.conv_list = SimpleSequential() with self.conv_list.init_scope(): for i, (out_channels, kernel_size, strides, padding) in enumerate(zip( out_channels_list, kernel_size_list, strides_list, padding_list)): setattr(self.conv_list, "conv{}".format(i + 1), ConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=kernel_size, stride=strides, pad=padding)) in_channels = out_channels def __call__(self, x): x = self.conv_list(x) return x class PolyConvSeqBranch(Chain): """ PolyNet specific convolutional sequence branch block with internal PolyNet specific convolution blocks. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of tuple of int List of numbers of output channels. kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int List of convolution window sizes. strides_list : list of tuple of int or tuple of tuple/list of 2 int List of strides of the convolution. padding_list : list of tuple of int or tuple of tuple/list of 2 int List of padding values for convolution layers. num_blocks : int Number of blocks for PolyConv. """ def __init__(self, in_channels, out_channels_list, kernel_size_list, strides_list, padding_list, num_blocks): super(PolyConvSeqBranch, self).__init__() assert (len(out_channels_list) == len(kernel_size_list)) assert (len(out_channels_list) == len(strides_list)) assert (len(out_channels_list) == len(padding_list)) with self.init_scope(): self.conv_list = ParametricSequential() with self.conv_list.init_scope(): for i, (out_channels, kernel_size, strides, padding) in enumerate(zip( out_channels_list, kernel_size_list, strides_list, padding_list)): setattr(self.conv_list, "conv{}".format(i + 1), PolyConv( in_channels=in_channels, out_channels=out_channels, ksize=kernel_size, stride=strides, pad=padding, num_blocks=num_blocks)) in_channels = out_channels def __call__(self, x, index): x = self.conv_list(x, index=index) return x class TwoWayABlock(Chain): """ PolyNet type Inception-A block. """ def __init__(self): super(TwoWayABlock, self).__init__() in_channels = 384 with self.init_scope(): self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(32, 48, 64), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 1), padding_list=(0, 1, 1))) setattr(self.branches, "branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(32, 32), kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 1))) setattr(self.branches, "branch3", Conv1x1Branch( in_channels=in_channels, out_channels=32)) self.conv = conv1x1_block( in_channels=128, out_channels=in_channels, activation=None) def __call__(self, x): x = self.branches(x) x = self.conv(x) return x class TwoWayBBlock(Chain): """ PolyNet type Inception-B block. """ def __init__(self): super(TwoWayBBlock, self).__init__() in_channels = 1152 with self.init_scope(): self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(128, 160, 192), kernel_size_list=(1, (1, 7), (7, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 3), (3, 0)))) setattr(self.branches, "branch2", Conv1x1Branch( in_channels=in_channels, out_channels=192)) self.conv = conv1x1_block( in_channels=384, out_channels=in_channels, activation=None) def __call__(self, x): x = self.branches(x) x = self.conv(x) return x class TwoWayCBlock(Chain): """ PolyNet type Inception-C block. """ def __init__(self): super(TwoWayCBlock, self).__init__() in_channels = 2048 with self.init_scope(): self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 224, 256), kernel_size_list=(1, (1, 3), (3, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 1), (1, 0)))) setattr(self.branches, "branch2", Conv1x1Branch( in_channels=in_channels, out_channels=192)) self.conv = conv1x1_block( in_channels=448, out_channels=in_channels, activation=None) def __call__(self, x): x = self.branches(x) x = self.conv(x) return x class PolyPreBBlock(Chain): """ PolyNet type PolyResidual-Pre-B block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. num_blocks : int Number of blocks (BatchNorm layers). """ def __init__(self, num_blocks): super(PolyPreBBlock, self).__init__() in_channels = 1152 with self.init_scope(): self.branches = ParametricConcurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", PolyConvSeqBranch( in_channels=in_channels, out_channels_list=(128, 160, 192), kernel_size_list=(1, (1, 7), (7, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 3), (3, 0)), num_blocks=num_blocks)) setattr(self.branches, "branch2", poly_conv1x1( in_channels=in_channels, out_channels=192, num_blocks=num_blocks)) def __call__(self, x, index): x = self.branches(x, index=index) return x class PolyPreCBlock(Chain): """ PolyNet type PolyResidual-Pre-C block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. num_blocks : int Number of blocks (BatchNorm layers). """ def __init__(self, num_blocks): super(PolyPreCBlock, self).__init__() in_channels = 2048 with self.init_scope(): self.branches = ParametricConcurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", PolyConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 224, 256), kernel_size_list=(1, (1, 3), (3, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 1), (1, 0)), num_blocks=num_blocks)) setattr(self.branches, "branch2", poly_conv1x1( in_channels=in_channels, out_channels=192, num_blocks=num_blocks)) def __call__(self, x, index): x = self.branches(x, index=index) return x def poly_res_b_block(): """ PolyNet type PolyResidual-Res-B block. """ return conv1x1_block( in_channels=384, out_channels=1152, stride=1, activation=None) def poly_res_c_block(): """ PolyNet type PolyResidual-Res-C block. """ return conv1x1_block( in_channels=448, out_channels=2048, stride=1, activation=None) class MultiResidual(Chain): """ Base class for constructing N-way modules (2-way, 3-way, and so on). Actually it is for 2-way modules. Parameters: ---------- scale : float, default 1.0 Scale value for each residual branch. res_block : Chain class Residual branch block. num_blocks : int Number of residual branches. """ def __init__(self, scale, res_block, num_blocks): super(MultiResidual, self).__init__() assert (num_blocks >= 1) self.scale = scale self.num_blocks = num_blocks with self.init_scope(): for i in range(num_blocks): setattr(self, "res_block{}".format(i + 1), res_block()) self.activ = F.relu def __call__(self, x): out = x for i in range(self.num_blocks): res_block = getattr(self, "res_block{}".format(i + 1)) out = out + self.scale * res_block(x) out = self.activ(out) return out class PolyResidual(Chain): """ The other base class for constructing N-way poly-modules. Actually it is for 3-way poly-modules. Parameters: ---------- scale : float, default 1.0 Scale value for each residual branch. res_block : Chain class Residual branch block. num_blocks : int Number of residual branches. pre_block : Chain class Preliminary block. """ def __init__(self, scale, res_block, num_blocks, pre_block): super(PolyResidual, self).__init__() assert (num_blocks >= 1) self.scale = scale self.num_blocks = num_blocks with self.init_scope(): self.pre_block = pre_block(num_blocks=num_blocks) for i in range(num_blocks): setattr(self, "res_block{}".format(i + 1), res_block()) self.activ = F.relu def __call__(self, x): out = x for index in range(self.num_blocks): x = self.pre_block(x, index) res_block = getattr(self, "res_block{}".format(index + 1)) x = res_block(x) out = out + self.scale * x x = self.activ(x) out = self.activ(out) return out class PolyBaseUnit(Chain): """ PolyNet unit base class. Parameters: ---------- two_way_scale : float Scale value for 2-way stage. two_way_block : Chain class Residual branch block for 2-way-stage. poly_scale : float, default 0.0 Scale value for 2-way stage. poly_res_block : Chain class, default None Residual branch block for poly-stage. poly_pre_block : Chain class, default None Preliminary branch block for poly-stage. """ def __init__(self, two_way_scale, two_way_block, poly_scale=0.0, poly_res_block=None, poly_pre_block=None): super(PolyBaseUnit, self).__init__() with self.init_scope(): if poly_res_block is not None: assert (poly_scale != 0.0) assert (poly_pre_block is not None) self.poly = PolyResidual( scale=poly_scale, res_block=poly_res_block, num_blocks=3, pre_block=poly_pre_block) else: assert (poly_scale == 0.0) assert (poly_pre_block is None) self.poly = None self.twoway = MultiResidual( scale=two_way_scale, res_block=two_way_block, num_blocks=2) def __call__(self, x): if self.poly is not None: x = self.poly(x) x = self.twoway(x) return x class PolyAUnit(PolyBaseUnit): """ PolyNet type A unit. Parameters: ---------- two_way_scale : float Scale value for 2-way stage. poly_scale : float Scale value for 2-way stage. """ def __init__(self, two_way_scale, poly_scale=0.0): super(PolyAUnit, self).__init__( two_way_scale=two_way_scale, two_way_block=TwoWayABlock) assert (poly_scale == 0.0) class PolyBUnit(PolyBaseUnit): """ PolyNet type B unit. Parameters: ---------- two_way_scale : float Scale value for 2-way stage. poly_scale : float Scale value for 2-way stage. """ def __init__(self, two_way_scale, poly_scale): super(PolyBUnit, self).__init__( two_way_scale=two_way_scale, two_way_block=TwoWayBBlock, poly_scale=poly_scale, poly_res_block=poly_res_b_block, poly_pre_block=PolyPreBBlock) class PolyCUnit(PolyBaseUnit): """ PolyNet type C unit. Parameters: ---------- two_way_scale : float Scale value for 2-way stage. poly_scale : float Scale value for 2-way stage. """ def __init__(self, two_way_scale, poly_scale): super(PolyCUnit, self).__init__( two_way_scale=two_way_scale, two_way_block=TwoWayCBlock, poly_scale=poly_scale, poly_res_block=poly_res_c_block, poly_pre_block=PolyPreCBlock) class ReductionAUnit(Chain): """ PolyNet type Reduction-A unit. """ def __init__(self): super(ReductionAUnit, self).__init__() in_channels = 384 with self.init_scope(): self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 256, 384), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 2), padding_list=(0, 1, 0))) setattr(self.branches, "branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(384,), kernel_size_list=(3,), strides_list=(2,), padding_list=(0,))) setattr(self.branches, "branch3", MaxPoolBranch()) def __call__(self, x): x = self.branches(x) return x class ReductionBUnit(Chain): """ PolyNet type Reduction-B unit. """ def __init__(self): super(ReductionBUnit, self).__init__() in_channels = 1152 with self.init_scope(): self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 256, 256), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 2), padding_list=(0, 1, 0))) setattr(self.branches, "branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 256), kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0))) setattr(self.branches, "branch3", ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 384), kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0))) setattr(self.branches, "branch4", MaxPoolBranch()) def __call__(self, x): x = self.branches(x) return x class PolyBlock3a(Chain): """ PolyNet type Mixed-3a block. """ def __init__(self): super(PolyBlock3a, self).__init__() with self.init_scope(): self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", MaxPoolBranch()) setattr(self.branches, "branch2", Conv3x3Branch( in_channels=64, out_channels=96)) def __call__(self, x): x = self.branches(x) return x class PolyBlock4a(Chain): """ PolyNet type Mixed-4a block. """ def __init__(self): super(PolyBlock4a, self).__init__() with self.init_scope(): self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", ConvSeqBranch( in_channels=160, out_channels_list=(64, 96), kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 0))) setattr(self.branches, "branch2", ConvSeqBranch( in_channels=160, out_channels_list=(64, 64, 64, 96), kernel_size_list=(1, (7, 1), (1, 7), 3), strides_list=(1, 1, 1, 1), padding_list=(0, (3, 0), (0, 3), 0))) def __call__(self, x): x = self.branches(x) return x class PolyBlock5a(Chain): """ PolyNet type Mixed-5a block. """ def __init__(self): super(PolyBlock5a, self).__init__() with self.init_scope(): self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", MaxPoolBranch()) setattr(self.branches, "branch2", Conv3x3Branch( in_channels=192, out_channels=192)) def __call__(self, x): x = self.branches(x) return x class PolyInitBlock(Chain): """ PolyNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(PolyInitBlock, self).__init__() with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=32, stride=2, pad=0) self.conv2 = conv3x3_block( in_channels=32, out_channels=32, pad=0) self.conv3 = conv3x3_block( in_channels=32, out_channels=64) self.block1 = PolyBlock3a() self.block2 = PolyBlock4a() self.block3 = PolyBlock5a() def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.block1(x) x = self.block2(x) x = self.block3(x) return x class PolyNet(Chain): """ PolyNet model from 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,' https://arxiv.org/abs/1611.05725. Parameters: ---------- two_way_scales : list of list of floats Two way scale values for each normal unit. poly_scales : list of list of floats Three way scale values for each normal unit. dropout_rate : float, default 0.2 Fraction of the input units to drop. Must be a number between 0 and 1. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (331, 331) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, two_way_scales, poly_scales, dropout_rate=0.2, in_channels=3, in_size=(331, 331), classes=1000): super(PolyNet, self).__init__() self.in_size = in_size self.classes = classes normal_units = [PolyAUnit, PolyBUnit, PolyCUnit] reduction_units = [ReductionAUnit, ReductionBUnit] with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", PolyInitBlock( in_channels=in_channels)) for i, (two_way_scales_per_stage, poly_scales_per_stage) in enumerate(zip(two_way_scales, poly_scales)): stage = SimpleSequential() with stage.init_scope(): for j, (two_way_scale, poly_scale) in enumerate(zip( two_way_scales_per_stage, poly_scales_per_stage)): if (j == 0) and (i != 0): unit = reduction_units[i - 1] setattr(stage, "unit{}".format(j + 1), unit()) else: unit = normal_units[i] setattr(stage, "unit{}".format(j + 1), unit( two_way_scale=two_way_scale, poly_scale=poly_scale)) setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=9, stride=1)) in_channels = 2048 self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "dropout", partial( F.dropout, ratio=dropout_rate)) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_polynet(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create PolyNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ two_way_scales = [ [1.000000, 0.992308, 0.984615, 0.976923, 0.969231, 0.961538, 0.953846, 0.946154, 0.938462, 0.930769], [0.000000, 0.915385, 0.900000, 0.884615, 0.869231, 0.853846, 0.838462, 0.823077, 0.807692, 0.792308, 0.776923], [0.000000, 0.761538, 0.746154, 0.730769, 0.715385, 0.700000]] poly_scales = [ [0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000], [0.000000, 0.923077, 0.907692, 0.892308, 0.876923, 0.861538, 0.846154, 0.830769, 0.815385, 0.800000, 0.784615], [0.000000, 0.769231, 0.753846, 0.738462, 0.723077, 0.707692]] net = PolyNet( two_way_scales=two_way_scales, poly_scales=poly_scales, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def polynet(**kwargs): """ PolyNet model from 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,' https://arxiv.org/abs/1611.05725. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_polynet(model_name="polynet", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ polynet, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != polynet or weight_count == 95366600) x = np.zeros((1, 3, 331, 331), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
31,208
30.748728
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/resnet_cifar.py
""" ResNet for CIFAR/SVHN, implemented in Chainer. Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. """ __all__ = ['CIFARResNet', 'resnet20_cifar10', 'resnet20_cifar100', 'resnet20_svhn', 'resnet56_cifar10', 'resnet56_cifar100', 'resnet56_svhn', 'resnet110_cifar10', 'resnet110_cifar100', 'resnet110_svhn', 'resnet164bn_cifar10', 'resnet164bn_cifar100', 'resnet164bn_svhn', 'resnet272bn_cifar10', 'resnet272bn_cifar100', 'resnet272bn_svhn', 'resnet542bn_cifar10', 'resnet542bn_cifar100', 'resnet542bn_svhn', 'resnet1001_cifar10', 'resnet1001_cifar100', 'resnet1001_svhn', 'resnet1202_cifar10', 'resnet1202_cifar100', 'resnet1202_svhn'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3_block, SimpleSequential from .resnet import ResUnit class CIFARResNet(Chain): """ ResNet model for CIFAR from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), classes=10): super(CIFARResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), ResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_resnet_cifar(classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ResNet model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def resnet20_cifar10(classes=10, **kwargs): """ ResNet-20 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_cifar10", **kwargs) def resnet20_cifar100(classes=100, **kwargs): """ ResNet-20 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_cifar100", **kwargs) def resnet20_svhn(classes=10, **kwargs): """ ResNet-20 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_svhn", **kwargs) def resnet56_cifar10(classes=10, **kwargs): """ ResNet-56 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_cifar10", **kwargs) def resnet56_cifar100(classes=100, **kwargs): """ ResNet-56 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_cifar100", **kwargs) def resnet56_svhn(classes=10, **kwargs): """ ResNet-56 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_svhn", **kwargs) def resnet110_cifar10(classes=10, **kwargs): """ ResNet-110 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_cifar10", **kwargs) def resnet110_cifar100(classes=100, **kwargs): """ ResNet-110 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_cifar100", **kwargs) def resnet110_svhn(classes=10, **kwargs): """ ResNet-110 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_svhn", **kwargs) def resnet164bn_cifar10(classes=10, **kwargs): """ ResNet-164(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_cifar10", **kwargs) def resnet164bn_cifar100(classes=100, **kwargs): """ ResNet-164(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_cifar100", **kwargs) def resnet164bn_svhn(classes=10, **kwargs): """ ResNet-164(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_svhn", **kwargs) def resnet272bn_cifar10(classes=10, **kwargs): """ ResNet-272(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_cifar10", **kwargs) def resnet272bn_cifar100(classes=100, **kwargs): """ ResNet-272(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_cifar100", **kwargs) def resnet272bn_svhn(classes=10, **kwargs): """ ResNet-272(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_svhn", **kwargs) def resnet542bn_cifar10(classes=10, **kwargs): """ ResNet-542(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_cifar10", **kwargs) def resnet542bn_cifar100(classes=100, **kwargs): """ ResNet-542(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_cifar100", **kwargs) def resnet542bn_svhn(classes=10, **kwargs): """ ResNet-542(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_svhn", **kwargs) def resnet1001_cifar10(classes=10, **kwargs): """ ResNet-1001 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_cifar10", **kwargs) def resnet1001_cifar100(classes=100, **kwargs): """ ResNet-1001 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_cifar100", **kwargs) def resnet1001_svhn(classes=10, **kwargs): """ ResNet-1001 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_svhn", **kwargs) def resnet1202_cifar10(classes=10, **kwargs): """ ResNet-1202 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_cifar10", **kwargs) def resnet1202_cifar100(classes=100, **kwargs): """ ResNet-1202 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_cifar100", **kwargs) def resnet1202_svhn(classes=10, **kwargs): """ ResNet-1202 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (resnet20_cifar10, 10), (resnet20_cifar100, 100), (resnet20_svhn, 10), (resnet56_cifar10, 10), (resnet56_cifar100, 100), (resnet56_svhn, 10), (resnet110_cifar10, 10), (resnet110_cifar100, 100), (resnet110_svhn, 10), (resnet164bn_cifar10, 10), (resnet164bn_cifar100, 100), (resnet164bn_svhn, 10), (resnet272bn_cifar10, 10), (resnet272bn_cifar100, 100), (resnet272bn_svhn, 10), (resnet542bn_cifar10, 10), (resnet542bn_cifar100, 100), (resnet542bn_svhn, 10), (resnet1001_cifar10, 10), (resnet1001_cifar100, 100), (resnet1001_svhn, 10), (resnet1202_cifar10, 10), (resnet1202_cifar100, 100), (resnet1202_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnet20_cifar10 or weight_count == 272474) assert (model != resnet20_cifar100 or weight_count == 278324) assert (model != resnet20_svhn or weight_count == 272474) assert (model != resnet56_cifar10 or weight_count == 855770) assert (model != resnet56_cifar100 or weight_count == 861620) assert (model != resnet56_svhn or weight_count == 855770) assert (model != resnet110_cifar10 or weight_count == 1730714) assert (model != resnet110_cifar100 or weight_count == 1736564) assert (model != resnet110_svhn or weight_count == 1730714) assert (model != resnet164bn_cifar10 or weight_count == 1704154) assert (model != resnet164bn_cifar100 or weight_count == 1727284) assert (model != resnet164bn_svhn or weight_count == 1704154) assert (model != resnet272bn_cifar10 or weight_count == 2816986) assert (model != resnet272bn_cifar100 or weight_count == 2840116) assert (model != resnet272bn_svhn or weight_count == 2816986) assert (model != resnet542bn_cifar10 or weight_count == 5599066) assert (model != resnet542bn_cifar100 or weight_count == 5622196) assert (model != resnet542bn_svhn or weight_count == 5599066) assert (model != resnet1001_cifar10 or weight_count == 10328602) assert (model != resnet1001_cifar100 or weight_count == 10351732) assert (model != resnet1001_svhn or weight_count == 10328602) assert (model != resnet1202_cifar10 or weight_count == 19424026) assert (model != resnet1202_cifar100 or weight_count == 19429876) assert (model != resnet1202_svhn or weight_count == 19424026) x = np.zeros((1, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (1, classes)) if __name__ == "__main__": _test()
23,007
35.520635
120
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/nasnet.py
""" NASNet-A for ImageNet-1K, implemented in Chainer. Original paper: 'Learning Transferable Architectures for Scalable Image Recognition,' https://arxiv.org/abs/1707.07012. """ __all__ = ['NASNet', 'nasnet_4a1056', 'nasnet_6a4032', 'nasnet_dual_path_sequential'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, SimpleSequential, DualPathSequential class NasDualPathScheme(object): """ NASNet specific scheme of dual path response for a module in a DualPathSequential module. Parameters: ---------- can_skip_input : bool Whether can skip input for some blocks. """ def __init__(self, can_skip_input): super(NasDualPathScheme, self).__init__() self.can_skip_input = can_skip_input """ Scheme function. Parameters: ---------- block : Chain A block. x : Tensor Current processed tensor. x_prev : Tensor Previous processed tensor. Returns: ------- x_next : Tensor Next processed tensor. x : Tensor Current processed tensor. """ def __call__(self, block, x, x_prev): x_next = block(x, x_prev) if type(x_next) == tuple: x_next, x = x_next if self.can_skip_input and hasattr(block, 'skip_input') and block.skip_input: x = x_prev return x_next, x def nasnet_dual_path_scheme_ordinal(block, x, _): """ NASNet specific scheme of dual path response for an ordinal block with dual inputs/outputs in a DualPathSequential block. Parameters: ---------- block : Chain A block. x : Tensor Current processed tensor. Returns: ------- x_next : Tensor Next processed tensor. x : Tensor Current processed tensor. """ return block(x), x def nasnet_dual_path_sequential(return_two=True, first_ordinals=0, last_ordinals=0, can_skip_input=False): """ NASNet specific dual path sequential container. Parameters: ---------- return_two : bool, default True Whether to return two output after execution. first_ordinals : int, default 0 Number of the first blocks with single input/output. last_ordinals : int, default 0 Number of the final blocks with single input/output. dual_path_scheme : function Scheme of dual path response for a block. dual_path_scheme_ordinal : function Scheme of dual path response for an ordinal block. can_skip_input : bool, default False Whether can skip input for some blocks. """ return DualPathSequential( return_two=return_two, first_ordinals=first_ordinals, last_ordinals=last_ordinals, dual_path_scheme=NasDualPathScheme(can_skip_input=can_skip_input), dual_path_scheme_ordinal=nasnet_dual_path_scheme_ordinal) def nasnet_batch_norm(channels): """ NASNet specific Batch normalization layer. Parameters: ---------- channels : int Number of channels in input data. """ return L.BatchNormalization( size=channels, decay=0.1, eps=0.001) def nasnet_avgpool1x1_s2(): """ NASNet specific 1x1 Average pooling layer with stride 2. """ return partial( F.average_pooling_2d, ksize=1, stride=2) def nasnet_avgpool3x3_s1(): """ NASNet specific 3x3 Average pooling layer with stride 1. """ return partial( F.average_pooling_nd, ksize=3, stride=1, pad=1, pad_value=None) def nasnet_avgpool3x3_s2(): """ NASNet specific 3x3 Average pooling layer with stride 2. """ return partial( F.average_pooling_nd, ksize=3, stride=2, pad=1, pad_value=None) def process_with_padding(x, process=(lambda x: x), pad_width=((0, 0), (0, 0), (1, 0), (1, 0))): """ Auxiliary decorator for layer with NASNet specific extra padding. Parameters: ---------- x : chainer.Variable or numpy.ndarray or cupy.ndarray Input tensor. process : function, default (lambda x: x) a decorated layer pad_width : tuple of tuple of int, default ((0, 0), (0, 0), (1, 0), (1, 0)) Whether the layer uses a bias vector. Returns: ------- chainer.Variable or numpy.ndarray or cupy.ndarray Resulted tensor. """ x = F.pad(x, pad_width=pad_width, mode="constant", constant_values=0) x = process(x) x = x[:, :, 1:, 1:] return x class NasMaxPoolBlock(Chain): """ NASNet specific Max pooling layer with extra padding. Parameters: ---------- extra_padding : bool, default False Whether to use extra padding. """ def __init__(self, extra_padding=False): super(NasMaxPoolBlock, self).__init__() self.extra_padding = extra_padding with self.init_scope(): self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) def __call__(self, x): if self.extra_padding: x = process_with_padding(x, self.pool) else: x = self.pool(x) return x class NasAvgPoolBlock(Chain): """ NASNet specific 3x3 Average pooling layer with extra padding. Parameters: ---------- extra_padding : bool, default False Whether to use extra padding. """ def __init__(self, extra_padding=False): super(NasAvgPoolBlock, self).__init__() self.extra_padding = extra_padding with self.init_scope(): self.pool = partial( F.average_pooling_nd, ksize=3, stride=2, pad=1, pad_value=None) def __call__(self, x): if self.extra_padding: x = process_with_padding(x, self.pool) else: x = self.pool(x) return x class NasConv(Chain): """ NASNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. groups : int Number of groups. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, groups): super(NasConv, self).__init__() with self.init_scope(): self.activ = F.relu self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=True, groups=groups) self.bn = nasnet_batch_norm(channels=out_channels) def __call__(self, x): x = self.activ(x) x = self.conv(x) x = self.bn(x) return x def nas_conv1x1(in_channels, out_channels): """ 1x1 version of the NASNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ return NasConv( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=1, pad=0, groups=1) class DwsConv(Chain): """ Standard depthwise separable convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. use_bias : bool, default False Whether the layers use a bias vector. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, use_bias=False): super(DwsConv, self).__init__() with self.init_scope(): self.dw_conv = L.Convolution2D( in_channels=in_channels, out_channels=in_channels, ksize=ksize, stride=stride, pad=pad, nobias=(not use_bias), groups=in_channels) self.pw_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias) def __call__(self, x): x = self.dw_conv(x) x = self.pw_conv(x) return x class NasDwsConv(Chain): """ NASNet specific depthwise separable convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. extra_padding : bool, default False Whether to use extra padding. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, extra_padding=False): super(NasDwsConv, self).__init__() self.extra_padding = extra_padding with self.init_scope(): self.activ = F.relu self.conv = DwsConv( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, use_bias=False) self.bn = nasnet_batch_norm(channels=out_channels) def __call__(self, x): x = self.activ(x) if self.extra_padding: x = process_with_padding(x, self.conv) else: x = self.conv(x) x = self.bn(x) return x class DwsBranch(Chain): """ NASNet specific block with depthwise separable convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. ding : int or tuple/list of 2 int Padding value for convolution layer. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, extra_padding=False, stem=False): super(DwsBranch, self).__init__() assert (not stem) or (not extra_padding) mid_channels = out_channels if stem else in_channels with self.init_scope(): self.conv1 = NasDwsConv( in_channels=in_channels, out_channels=mid_channels, ksize=ksize, stride=stride, pad=pad, extra_padding=extra_padding) self.conv2 = NasDwsConv( in_channels=mid_channels, out_channels=out_channels, ksize=ksize, stride=1, pad=pad) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x def dws_branch_k3_s1_p1(in_channels, out_channels, extra_padding=False): """ 3x3/1/1 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=1, pad=1, extra_padding=extra_padding) def dws_branch_k5_s1_p2(in_channels, out_channels, extra_padding=False): """ 5x5/1/2 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, ksize=5, stride=1, pad=2, extra_padding=extra_padding) def dws_branch_k5_s2_p2(in_channels, out_channels, extra_padding=False, stem=False): """ 5x5/2/2 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, ksize=5, stride=2, pad=2, extra_padding=extra_padding, stem=stem) def dws_branch_k7_s2_p3(in_channels, out_channels, extra_padding=False, stem=False): """ 7x7/2/3 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, ksize=7, stride=2, pad=3, extra_padding=extra_padding, stem=stem) class NasPathBranch(Chain): """ NASNet specific `path` branch (auxiliary block). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. """ def __init__(self, in_channels, out_channels, extra_padding=False): super(NasPathBranch, self).__init__() self.extra_padding = extra_padding with self.init_scope(): self.avgpool = nasnet_avgpool1x1_s2() self.conv = conv1x1( in_channels=in_channels, out_channels=out_channels) def __call__(self, x): if self.extra_padding: x = process_with_padding(x, pad_width=((0, 0), (0, 0), (0, 1), (0, 1))) x = self.avgpool(x) x = self.conv(x) return x class NasPathBlock(Chain): """ NASNet specific `path` block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(NasPathBlock, self).__init__() mid_channels = out_channels // 2 with self.init_scope(): self.activ = F.relu self.path1 = NasPathBranch( in_channels=in_channels, out_channels=mid_channels) self.path2 = NasPathBranch( in_channels=in_channels, out_channels=mid_channels, extra_padding=True) self.bn = nasnet_batch_norm(channels=out_channels) def __call__(self, x): x = self.activ(x) x1 = self.path1(x) x2 = self.path2(x) x = F.concat((x1, x2), axis=1) x = self.bn(x) return x class Stem1Unit(Chain): """ NASNet Stem1 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(Stem1Unit, self).__init__() mid_channels = out_channels // 4 with self.init_scope(): self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.comb0_left = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels) self.comb0_right = dws_branch_k7_s2_p3( in_channels=in_channels, out_channels=mid_channels, stem=True) self.comb1_left = NasMaxPoolBlock(extra_padding=False) self.comb1_right = dws_branch_k7_s2_p3( in_channels=in_channels, out_channels=mid_channels, stem=True) self.comb2_left = nasnet_avgpool3x3_s2() self.comb2_right = dws_branch_k5_s2_p2( in_channels=in_channels, out_channels=mid_channels, stem=True) self.comb3_right = nasnet_avgpool3x3_s1() self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels) self.comb4_right = NasMaxPoolBlock(extra_padding=False) def __call__(self, x, _=None): x_left = self.conv1x1(x) x_right = x x0 = self.comb0_left(x_left) + self.comb0_right(x_right) x1 = self.comb1_left(x_left) + self.comb1_right(x_right) x2 = self.comb2_left(x_left) + self.comb2_right(x_right) x3 = x1 + self.comb3_right(x0) x4 = self.comb4_left(x0) + self.comb4_right(x_left) x_out = F.concat((x1, x2, x3, x4), axis=1) return x_out class Stem2Unit(Chain): """ NASNet Stem2 unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. extra_padding : bool Whether to use extra padding. """ def __init__(self, in_channels, prev_in_channels, out_channels, extra_padding): super(Stem2Unit, self).__init__() mid_channels = out_channels // 4 with self.init_scope(): self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.path = NasPathBlock( in_channels=prev_in_channels, out_channels=mid_channels) self.comb0_left = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb0_right = dws_branch_k7_s2_p3( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb1_left = NasMaxPoolBlock(extra_padding=extra_padding) self.comb1_right = dws_branch_k7_s2_p3( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb2_left = NasAvgPoolBlock(extra_padding=extra_padding) self.comb2_right = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb3_right = nasnet_avgpool3x3_s1() self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb4_right = NasMaxPoolBlock(extra_padding=extra_padding) def __call__(self, x, x_prev): x_left = self.conv1x1(x) x_right = self.path(x_prev) x0 = self.comb0_left(x_left) + self.comb0_right(x_right) x1 = self.comb1_left(x_left) + self.comb1_right(x_right) x2 = self.comb2_left(x_left) + self.comb2_right(x_right) x3 = x1 + self.comb3_right(x0) x4 = self.comb4_left(x0) + self.comb4_right(x_left) x_out = F.concat((x1, x2, x3, x4), axis=1) return x_out class FirstUnit(Chain): """ NASNet First unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. """ def __init__(self, in_channels, prev_in_channels, out_channels): super(FirstUnit, self).__init__() mid_channels = out_channels // 6 with self.init_scope(): self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.path = NasPathBlock( in_channels=prev_in_channels, out_channels=mid_channels) self.comb0_left = dws_branch_k5_s1_p2( in_channels=mid_channels, out_channels=mid_channels) self.comb0_right = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels) self.comb1_left = dws_branch_k5_s1_p2( in_channels=mid_channels, out_channels=mid_channels) self.comb1_right = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels) self.comb2_left = nasnet_avgpool3x3_s1() self.comb3_left = nasnet_avgpool3x3_s1() self.comb3_right = nasnet_avgpool3x3_s1() self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels) def __call__(self, x, x_prev): x_left = self.conv1x1(x) x_right = self.path(x_prev) x0 = self.comb0_left(x_left) + self.comb0_right(x_right) x1 = self.comb1_left(x_right) + self.comb1_right(x_right) x2 = self.comb2_left(x_left) + x_right x3 = self.comb3_left(x_right) + self.comb3_right(x_right) x4 = self.comb4_left(x_left) + x_left x_out = F.concat((x_right, x0, x1, x2, x3, x4), axis=1) return x_out class NormalUnit(Chain): """ NASNet Normal unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. """ def __init__(self, in_channels, prev_in_channels, out_channels): super(NormalUnit, self).__init__() mid_channels = out_channels // 6 with self.init_scope(): self.conv1x1_prev = nas_conv1x1( in_channels=prev_in_channels, out_channels=mid_channels) self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.comb0_left = dws_branch_k5_s1_p2( in_channels=mid_channels, out_channels=mid_channels) self.comb0_right = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels) self.comb1_left = dws_branch_k5_s1_p2( in_channels=mid_channels, out_channels=mid_channels) self.comb1_right = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels) self.comb2_left = nasnet_avgpool3x3_s1() self.comb3_left = nasnet_avgpool3x3_s1() self.comb3_right = nasnet_avgpool3x3_s1() self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels) def __call__(self, x, x_prev): x_left = self.conv1x1(x) x_right = self.conv1x1_prev(x_prev) x0 = self.comb0_left(x_left) + self.comb0_right(x_right) x1 = self.comb1_left(x_right) + self.comb1_right(x_right) x2 = self.comb2_left(x_left) + x_right x3 = self.comb3_left(x_right) + self.comb3_right(x_right) x4 = self.comb4_left(x_left) + x_left x_out = F.concat((x_right, x0, x1, x2, x3, x4), axis=1) return x_out class ReductionBaseUnit(Chain): """ NASNet Reduction base unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. extra_padding : bool, default True Whether to use extra padding. """ def __init__(self, in_channels, prev_in_channels, out_channels, extra_padding=True): super(ReductionBaseUnit, self).__init__() self.skip_input = True mid_channels = out_channels // 4 with self.init_scope(): self.conv1x1_prev = nas_conv1x1( in_channels=prev_in_channels, out_channels=mid_channels) self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.comb0_left = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb0_right = dws_branch_k7_s2_p3( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb1_left = NasMaxPoolBlock(extra_padding=extra_padding) self.comb1_right = dws_branch_k7_s2_p3( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb2_left = NasAvgPoolBlock(extra_padding=extra_padding) self.comb2_right = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb3_right = nasnet_avgpool3x3_s1() self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb4_right = NasMaxPoolBlock(extra_padding=extra_padding) def __call__(self, x, x_prev): x_left = self.conv1x1(x) x_right = self.conv1x1_prev(x_prev) x0 = self.comb0_left(x_left) + self.comb0_right(x_right) x1 = self.comb1_left(x_left) + self.comb1_right(x_right) x2 = self.comb2_left(x_left) + self.comb2_right(x_right) x3 = x1 + self.comb3_right(x0) x4 = self.comb4_left(x0) + self.comb4_right(x_left) x_out = F.concat((x1, x2, x3, x4), axis=1) return x_out class Reduction1Unit(ReductionBaseUnit): """ NASNet Reduction1 unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. """ def __init__(self, in_channels, prev_in_channels, out_channels): super(Reduction1Unit, self).__init__( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, extra_padding=True) class Reduction2Unit(ReductionBaseUnit): """ NASNet Reduction2 unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. extra_padding : bool Whether to use extra padding. """ def __init__(self, in_channels, prev_in_channels, out_channels, extra_padding): super(Reduction2Unit, self).__init__( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, extra_padding=extra_padding) class NASNetInitBlock(Chain): """ NASNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(NASNetInitBlock, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=2, pad=0, nobias=True) self.bn = nasnet_batch_norm(channels=out_channels) def __call__(self, x): x = self.conv(x) x = self.bn(x) return x class NASNet(Chain): """ NASNet-A model from 'Learning Transferable Architectures for Scalable Image Recognition,' https://arxiv.org/abs/1707.07012. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. stem_blocks_channels : list of 2 int Number of output channels for the Stem units. final_pool_size : int Size of the pooling windows for final pool. extra_padding : bool Whether to use extra padding. skip_reduction_layer_input : bool Whether to skip the reduction layers when calculating the previous layer to connect to. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, stem_blocks_channels, final_pool_size, extra_padding, skip_reduction_layer_input, in_channels=3, in_size=(224, 224), classes=1000): super(NASNet, self).__init__() self.in_size = in_size self.classes = classes reduction_units = [Reduction1Unit, Reduction2Unit] with self.init_scope(): self.features = nasnet_dual_path_sequential( return_two=False, first_ordinals=1, last_ordinals=2) with self.features.init_scope(): setattr(self.features, "init_block", NASNetInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels out_channels = stem_blocks_channels[0] setattr(self.features, "stem1_unit", Stem1Unit( in_channels=in_channels, out_channels=out_channels)) prev_in_channels = in_channels in_channels = out_channels out_channels = stem_blocks_channels[1] setattr(self.features, "stem2_unit", Stem2Unit( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, extra_padding=extra_padding)) prev_in_channels = in_channels in_channels = out_channels for i, channels_per_stage in enumerate(channels): stage = nasnet_dual_path_sequential(can_skip_input=skip_reduction_layer_input) with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): if (j == 0) and (i != 0): unit = reduction_units[i - 1] elif ((i == 0) and (j == 0)) or ((i != 0) and (j == 1)): unit = FirstUnit else: unit = NormalUnit if unit == Reduction2Unit: setattr(stage, "unit{}".format(j + 1), Reduction2Unit( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, extra_padding=extra_padding)) else: setattr(stage, "unit{}".format(j + 1), unit( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels)) prev_in_channels = in_channels in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_activ", F.relu) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=final_pool_size, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "dropout", partial( F.dropout, ratio=0.5)) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_nasnet(repeat, penultimate_filters, init_block_channels, final_pool_size, extra_padding, skip_reduction_layer_input, in_size, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create NASNet-A model with specific parameters. Parameters: ---------- repeat : int NNumber of cell repeats. penultimate_filters : int Number of filters in the penultimate layer of the network. init_block_channels : int Number of output channels for the initial unit. final_pool_size : int Size of the pooling windows for final pool. extra_padding : bool Whether to use extra padding. skip_reduction_layer_input : bool Whether to skip the reduction layers when calculating the previous layer to connect to. in_size : tuple of two ints Spatial size of the expected input image. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ stem_blocks_channels = [1, 2] reduct_channels = [[], [8], [16]] norm_channels = [6, 12, 24] channels = [rci + [nci] * repeat for rci, nci in zip(reduct_channels, norm_channels)] base_channel_chunk = penultimate_filters // channels[-1][-1] stem_blocks_channels = [(ci * base_channel_chunk) for ci in stem_blocks_channels] channels = [[(cij * base_channel_chunk) for cij in ci] for ci in channels] net = NASNet( channels=channels, init_block_channels=init_block_channels, stem_blocks_channels=stem_blocks_channels, final_pool_size=final_pool_size, extra_padding=extra_padding, skip_reduction_layer_input=skip_reduction_layer_input, in_size=in_size, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def nasnet_4a1056(**kwargs): """ NASNet-A 4@1056 (NASNet-A-Mobile) model from 'Learning Transferable Architectures for Scalable Image Recognition,' https://arxiv.org/abs/1707.07012. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_nasnet( repeat=4, penultimate_filters=1056, init_block_channels=32, final_pool_size=7, extra_padding=True, skip_reduction_layer_input=False, in_size=(224, 224), model_name="nasnet_4a1056", **kwargs) def nasnet_6a4032(**kwargs): """ NASNet-A 6@4032 (NASNet-A-Large) model from 'Learning Transferable Architectures for Scalable Image Recognition,' https://arxiv.org/abs/1707.07012. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_nasnet( repeat=6, penultimate_filters=4032, init_block_channels=96, final_pool_size=11, extra_padding=False, skip_reduction_layer_input=True, in_size=(331, 331), model_name="nasnet_6a4032", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ nasnet_4a1056, nasnet_6a4032, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != nasnet_4a1056 or weight_count == 5289978) assert (model != nasnet_6a4032 or weight_count == 88753150) x = np.zeros((1, 3, net.in_size[0], net.in_size[1]), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
40,375
29.244195
118
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/resnext_cifar.py
""" ResNeXt for CIFAR/SVHN, implemented in Chainer. Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. """ __all__ = ['CIFARResNeXt', 'resnext20_16x4d_cifar10', 'resnext20_16x4d_cifar100', 'resnext20_16x4d_svhn', 'resnext20_32x2d_cifar10', 'resnext20_32x2d_cifar100', 'resnext20_32x2d_svhn', 'resnext20_32x4d_cifar10', 'resnext20_32x4d_cifar100', 'resnext20_32x4d_svhn', 'resnext29_32x4d_cifar10', 'resnext29_32x4d_cifar100', 'resnext29_32x4d_svhn', 'resnext29_16x64d_cifar10', 'resnext29_16x64d_cifar100', 'resnext29_16x64d_svhn', 'resnext272_1x64d_cifar10', 'resnext272_1x64d_cifar100', 'resnext272_1x64d_svhn', 'resnext272_2x32d_cifar10', 'resnext272_2x32d_cifar100', 'resnext272_2x32d_svhn'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3_block, SimpleSequential from .resnext import ResNeXtUnit class CIFARResNeXt(Chain): """ ResNeXt model for CIFAR from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(32, 32), classes=10): super(CIFARResNeXt, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), ResNeXtUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_resnext_cifar(classes, blocks, cardinality, bottleneck_width, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ ResNeXt model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert (blocks - 2) % 9 == 0 layers = [(blocks - 2) // 9] * 3 channels_per_layers = [256, 512, 1024] init_block_channels = 64 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = CIFARResNeXt( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def resnext20_16x4d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (16x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=4, model_name="resnext20_16x4d_cifar10", **kwargs) def resnext20_16x4d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (16x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=4, model_name="resnext20_16x4d_cifar100", **kwargs) def resnext20_16x4d_svhn(classes=10, **kwargs): """ ResNeXt-20 (16x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=4, model_name="resnext20_16x4d_svhn", **kwargs) def resnext20_32x2d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (32x2d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=2, model_name="resnext20_32x2d_cifar10", **kwargs) def resnext20_32x2d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (32x2d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=2, model_name="resnext20_32x2d_cifar100", **kwargs) def resnext20_32x2d_svhn(classes=10, **kwargs): """ ResNeXt-20 (32x2d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=2, model_name="resnext20_32x2d_svhn", **kwargs) def resnext20_32x4d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (32x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=4, model_name="resnext20_32x4d_cifar10", **kwargs) def resnext20_32x4d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (32x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=4, model_name="resnext20_32x4d_cifar100", **kwargs) def resnext20_32x4d_svhn(classes=10, **kwargs): """ ResNeXt-20 (32x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=4, model_name="resnext20_32x4d_svhn", **kwargs) def resnext29_32x4d_cifar10(classes=10, **kwargs): """ ResNeXt-29 (32x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=29, cardinality=32, bottleneck_width=4, model_name="resnext29_32x4d_cifar10", **kwargs) def resnext29_32x4d_cifar100(classes=100, **kwargs): """ ResNeXt-29 (32x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=29, cardinality=32, bottleneck_width=4, model_name="resnext29_32x4d_cifar100", **kwargs) def resnext29_32x4d_svhn(classes=10, **kwargs): """ ResNeXt-29 (32x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=29, cardinality=32, bottleneck_width=4, model_name="resnext29_32x4d_svhn", **kwargs) def resnext29_16x64d_cifar10(classes=10, **kwargs): """ ResNeXt-29 (16x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=29, cardinality=16, bottleneck_width=64, model_name="resnext29_16x64d_cifar10", **kwargs) def resnext29_16x64d_cifar100(classes=100, **kwargs): """ ResNeXt-29 (16x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=29, cardinality=16, bottleneck_width=64, model_name="resnext29_16x64d_cifar100", **kwargs) def resnext29_16x64d_svhn(classes=10, **kwargs): """ ResNeXt-29 (16x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=29, cardinality=16, bottleneck_width=64, model_name="resnext29_16x64d_svhn", **kwargs) def resnext272_1x64d_cifar10(classes=10, **kwargs): """ ResNeXt-272 (1x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=272, cardinality=1, bottleneck_width=64, model_name="resnext272_1x64d_cifar10", **kwargs) def resnext272_1x64d_cifar100(classes=100, **kwargs): """ ResNeXt-272 (1x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=272, cardinality=1, bottleneck_width=64, model_name="resnext272_1x64d_cifar100", **kwargs) def resnext272_1x64d_svhn(classes=10, **kwargs): """ ResNeXt-272 (1x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=272, cardinality=1, bottleneck_width=64, model_name="resnext272_1x64d_svhn", **kwargs) def resnext272_2x32d_cifar10(classes=10, **kwargs): """ ResNeXt-272 (2x32d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=272, cardinality=2, bottleneck_width=32, model_name="resnext272_2x32d_cifar10", **kwargs) def resnext272_2x32d_cifar100(classes=100, **kwargs): """ ResNeXt-272 (2x32d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=272, cardinality=2, bottleneck_width=32, model_name="resnext272_2x32d_cifar100", **kwargs) def resnext272_2x32d_svhn(classes=10, **kwargs): """ ResNeXt-272 (2x32d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=272, cardinality=2, bottleneck_width=32, model_name="resnext272_2x32d_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (resnext20_16x4d_cifar10, 10), (resnext20_16x4d_cifar100, 100), (resnext20_16x4d_svhn, 10), (resnext20_32x2d_cifar10, 10), (resnext20_32x2d_cifar100, 100), (resnext20_32x2d_svhn, 10), (resnext20_32x4d_cifar10, 10), (resnext20_32x4d_cifar100, 100), (resnext20_32x4d_svhn, 10), (resnext29_32x4d_cifar10, 10), (resnext29_32x4d_cifar100, 100), (resnext29_32x4d_svhn, 10), (resnext29_16x64d_cifar10, 10), (resnext29_16x64d_cifar100, 100), (resnext29_16x64d_svhn, 10), (resnext272_1x64d_cifar10, 10), (resnext272_1x64d_cifar100, 100), (resnext272_1x64d_svhn, 10), (resnext272_2x32d_cifar10, 10), (resnext272_2x32d_cifar100, 100), (resnext272_2x32d_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnext20_16x4d_cifar10 or weight_count == 1995082) assert (model != resnext20_16x4d_cifar100 or weight_count == 2087332) assert (model != resnext20_16x4d_svhn or weight_count == 1995082) assert (model != resnext20_32x2d_cifar10 or weight_count == 1946698) assert (model != resnext20_32x2d_cifar100 or weight_count == 2038948) assert (model != resnext20_32x2d_svhn or weight_count == 1946698) assert (model != resnext20_32x4d_cifar10 or weight_count == 3295562) assert (model != resnext20_32x4d_cifar100 or weight_count == 3387812) assert (model != resnext20_32x4d_svhn or weight_count == 3295562) assert (model != resnext29_32x4d_cifar10 or weight_count == 4775754) assert (model != resnext29_32x4d_cifar100 or weight_count == 4868004) assert (model != resnext29_32x4d_svhn or weight_count == 4775754) assert (model != resnext29_16x64d_cifar10 or weight_count == 68155210) assert (model != resnext29_16x64d_cifar100 or weight_count == 68247460) assert (model != resnext29_16x64d_svhn or weight_count == 68155210) assert (model != resnext272_1x64d_cifar10 or weight_count == 44540746) assert (model != resnext272_1x64d_cifar100 or weight_count == 44632996) assert (model != resnext272_1x64d_svhn or weight_count == 44540746) assert (model != resnext272_2x32d_cifar10 or weight_count == 32928586) assert (model != resnext272_2x32d_cifar100 or weight_count == 33020836) assert (model != resnext272_2x32d_svhn or weight_count == 32928586) x = np.zeros((1, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (1, classes)) if __name__ == "__main__": _test()
22,926
37.275459
116
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/densenet_cifar.py
""" DenseNet for CIFAR/SVHN, implemented in Chainer. Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. """ __all__ = ['CIFARDenseNet', 'densenet40_k12_cifar10', 'densenet40_k12_cifar100', 'densenet40_k12_svhn', 'densenet40_k12_bc_cifar10', 'densenet40_k12_bc_cifar100', 'densenet40_k12_bc_svhn', 'densenet40_k24_bc_cifar10', 'densenet40_k24_bc_cifar100', 'densenet40_k24_bc_svhn', 'densenet40_k36_bc_cifar10', 'densenet40_k36_bc_cifar100', 'densenet40_k36_bc_svhn', 'densenet100_k12_cifar10', 'densenet100_k12_cifar100', 'densenet100_k12_svhn', 'densenet100_k24_cifar10', 'densenet100_k24_cifar100', 'densenet100_k24_svhn', 'densenet100_k12_bc_cifar10', 'densenet100_k12_bc_cifar100', 'densenet100_k12_bc_svhn', 'densenet190_k40_bc_cifar10', 'densenet190_k40_bc_cifar100', 'densenet190_k40_bc_svhn', 'densenet250_k24_bc_cifar10', 'densenet250_k24_bc_cifar100', 'densenet250_k24_bc_svhn'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3, pre_conv3x3_block, SimpleSequential from .preresnet import PreResActivation from .densenet import DenseUnit, TransitionBlock class DenseSimpleUnit(Chain): """ DenseNet simple unit for CIFAR. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels, dropout_rate): super(DenseSimpleUnit, self).__init__() self.use_dropout = (dropout_rate != 0.0) inc_channels = out_channels - in_channels with self.init_scope(): self.conv = pre_conv3x3_block( in_channels=in_channels, out_channels=inc_channels) if self.use_dropout: self.dropout = partial( F.dropout, ratio=dropout_rate) def __call__(self, x): identity = x x = self.conv(x) if self.use_dropout: x = self.dropout(x) x = F.concat((identity, x), axis=1) return x class CIFARDenseNet(Chain): """ DenseNet model for CIFAR from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, dropout_rate=0.0, in_channels=3, in_size=(32, 32), classes=10): super(CIFARDenseNet, self).__init__() self.in_size = in_size self.classes = classes unit_class = DenseUnit if bottleneck else DenseSimpleUnit with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): if i != 0: setattr(stage, "trans{}".format(i + 1), TransitionBlock( in_channels=in_channels, out_channels=(in_channels // 2))) in_channels = in_channels // 2 for j, out_channels in enumerate(channels_per_stage): setattr(stage, "unit{}".format(j + 1), unit_class( in_channels=in_channels, out_channels=out_channels, dropout_rate=dropout_rate)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "post_activ", PreResActivation(in_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=8, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_densenet_cifar(classes, blocks, growth_rate, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create DenseNet model for CIFAR-10 with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. growth_rate : int Growth rate. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 4) % 6 == 0) layers = [(blocks - 4) // 6] * 3 else: assert ((blocks - 4) % 3 == 0) layers = [(blocks - 4) // 3] * 3 init_block_channels = 2 * growth_rate from functools import reduce channels = reduce( lambda xi, yi: xi + [reduce( lambda xj, yj: xj + [xj[-1] + yj], [growth_rate] * yi, [xi[-1][-1] // 2])[1:]], layers, [[init_block_channels * 2]])[1:] net = CIFARDenseNet( channels=channels, init_block_channels=init_block_channels, classes=classes, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def densenet40_k12_cifar10(classes=10, **kwargs): """ DenseNet-40 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=False, model_name="densenet40_k12_cifar10", **kwargs) def densenet40_k12_cifar100(classes=100, **kwargs): """ DenseNet-40 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=False, model_name="densenet40_k12_cifar100", **kwargs) def densenet40_k12_svhn(classes=10, **kwargs): """ DenseNet-40 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=False, model_name="densenet40_k12_svhn", **kwargs) def densenet40_k12_bc_cifar10(classes=10, **kwargs): """ DenseNet-BC-40 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=True, model_name="densenet40_k12_bc_cifar10", **kwargs) def densenet40_k12_bc_cifar100(classes=100, **kwargs): """ DenseNet-BC-40 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=True, model_name="densenet40_k12_bc_cifar100", **kwargs) def densenet40_k12_bc_svhn(classes=10, **kwargs): """ DenseNet-BC-40 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=True, model_name="densenet40_k12_bc_svhn", **kwargs) def densenet40_k24_bc_cifar10(classes=10, **kwargs): """ DenseNet-BC-40 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True, model_name="densenet40_k24_bc_cifar10", **kwargs) def densenet40_k24_bc_cifar100(classes=100, **kwargs): """ DenseNet-BC-40 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True, model_name="densenet40_k24_bc_cifar100", **kwargs) def densenet40_k24_bc_svhn(classes=10, **kwargs): """ DenseNet-BC-40 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True, model_name="densenet40_k24_bc_svhn", **kwargs) def densenet40_k36_bc_cifar10(classes=10, **kwargs): """ DenseNet-BC-40 (k=36) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True, model_name="densenet40_k36_bc_cifar10", **kwargs) def densenet40_k36_bc_cifar100(classes=100, **kwargs): """ DenseNet-BC-40 (k=36) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True, model_name="densenet40_k36_bc_cifar100", **kwargs) def densenet40_k36_bc_svhn(classes=10, **kwargs): """ DenseNet-BC-40 (k=36) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True, model_name="densenet40_k36_bc_svhn", **kwargs) def densenet100_k12_cifar10(classes=10, **kwargs): """ DenseNet-100 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False, model_name="densenet100_k12_cifar10", **kwargs) def densenet100_k12_cifar100(classes=100, **kwargs): """ DenseNet-100 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False, model_name="densenet100_k12_cifar100", **kwargs) def densenet100_k12_svhn(classes=10, **kwargs): """ DenseNet-100 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False, model_name="densenet100_k12_svhn", **kwargs) def densenet100_k24_cifar10(classes=10, **kwargs): """ DenseNet-100 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False, model_name="densenet100_k24_cifar10", **kwargs) def densenet100_k24_cifar100(classes=100, **kwargs): """ DenseNet-100 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False, model_name="densenet100_k24_cifar100", **kwargs) def densenet100_k24_svhn(classes=10, **kwargs): """ DenseNet-100 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False, model_name="densenet100_k24_svhn", **kwargs) def densenet100_k12_bc_cifar10(classes=10, **kwargs): """ DenseNet-BC-100 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True, model_name="densenet100_k12_bc_cifar10", **kwargs) def densenet100_k12_bc_cifar100(classes=100, **kwargs): """ DenseNet-BC-100 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True, model_name="densenet100_k12_bc_cifar100", **kwargs) def densenet100_k12_bc_svhn(classes=10, **kwargs): """ DenseNet-BC-100 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True, model_name="densenet100_k12_bc_svhn", **kwargs) def densenet190_k40_bc_cifar10(classes=10, **kwargs): """ DenseNet-BC-190 (k=40) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True, model_name="densenet190_k40_bc_cifar10", **kwargs) def densenet190_k40_bc_cifar100(classes=100, **kwargs): """ DenseNet-BC-190 (k=40) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True, model_name="densenet190_k40_bc_cifar100", **kwargs) def densenet190_k40_bc_svhn(classes=10, **kwargs): """ DenseNet-BC-190 (k=40) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True, model_name="densenet190_k40_bc_svhn", **kwargs) def densenet250_k24_bc_cifar10(classes=10, **kwargs): """ DenseNet-BC-250 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True, model_name="densenet250_k24_bc_cifar10", **kwargs) def densenet250_k24_bc_cifar100(classes=100, **kwargs): """ DenseNet-BC-250 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True, model_name="densenet250_k24_bc_cifar100", **kwargs) def densenet250_k24_bc_svhn(classes=10, **kwargs): """ DenseNet-BC-250 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True, model_name="densenet250_k24_bc_svhn", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ (densenet40_k12_cifar10, 10), (densenet40_k12_cifar100, 100), (densenet40_k12_svhn, 10), (densenet40_k12_bc_cifar10, 10), (densenet40_k12_bc_cifar100, 100), (densenet40_k12_bc_svhn, 10), (densenet40_k24_bc_cifar10, 10), (densenet40_k24_bc_cifar100, 100), (densenet40_k24_bc_svhn, 10), (densenet40_k36_bc_cifar10, 10), (densenet40_k36_bc_cifar100, 100), (densenet40_k36_bc_svhn, 10), (densenet100_k12_cifar10, 10), (densenet100_k12_cifar100, 100), (densenet100_k12_svhn, 10), (densenet100_k24_cifar10, 10), (densenet100_k24_cifar100, 100), (densenet100_k24_svhn, 10), (densenet100_k12_bc_cifar10, 10), (densenet100_k12_bc_cifar100, 100), (densenet100_k12_bc_svhn, 10), (densenet190_k40_bc_cifar10, 10), (densenet190_k40_bc_cifar100, 100), (densenet190_k40_bc_svhn, 10), (densenet250_k24_bc_cifar10, 10), (densenet250_k24_bc_cifar100, 100), (densenet250_k24_bc_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != densenet40_k12_cifar10 or weight_count == 599050) assert (model != densenet40_k12_cifar100 or weight_count == 622360) assert (model != densenet40_k12_svhn or weight_count == 599050) assert (model != densenet40_k12_bc_cifar10 or weight_count == 176122) assert (model != densenet40_k12_bc_cifar100 or weight_count == 188092) assert (model != densenet40_k12_bc_svhn or weight_count == 176122) assert (model != densenet40_k24_bc_cifar10 or weight_count == 690346) assert (model != densenet40_k24_bc_cifar100 or weight_count == 714196) assert (model != densenet40_k24_bc_svhn or weight_count == 690346) assert (model != densenet40_k36_bc_cifar10 or weight_count == 1542682) assert (model != densenet40_k36_bc_cifar100 or weight_count == 1578412) assert (model != densenet40_k36_bc_svhn or weight_count == 1542682) assert (model != densenet100_k12_cifar10 or weight_count == 4068490) assert (model != densenet100_k12_cifar100 or weight_count == 4129600) assert (model != densenet100_k12_svhn or weight_count == 4068490) assert (model != densenet100_k24_cifar10 or weight_count == 16114138) assert (model != densenet100_k24_cifar100 or weight_count == 16236268) assert (model != densenet100_k24_svhn or weight_count == 16114138) assert (model != densenet100_k12_bc_cifar10 or weight_count == 769162) assert (model != densenet100_k12_bc_cifar100 or weight_count == 800032) assert (model != densenet100_k12_bc_svhn or weight_count == 769162) assert (model != densenet190_k40_bc_cifar10 or weight_count == 25624430) assert (model != densenet190_k40_bc_cifar100 or weight_count == 25821620) assert (model != densenet190_k40_bc_svhn or weight_count == 25624430) assert (model != densenet250_k24_bc_cifar10 or weight_count == 15324406) assert (model != densenet250_k24_bc_cifar100 or weight_count == 15480556) assert (model != densenet250_k24_bc_svhn or weight_count == 15324406) x = np.zeros((1, 3, 32, 32), np.float32) y = net(x) assert (y.shape == (1, classes)) if __name__ == "__main__": _test()
29,342
36.910853
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/bninception.py
""" BN-Inception for ImageNet-1K, implemented in Chainer. Original paper: 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift,' https://arxiv.org/abs/1502.03167. """ __all__ = ['BNInception', 'bninception'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, conv3x3_block, conv7x7_block, Concurrent, SimpleSequential class Inception3x3Branch(Chain): """ BN-Inception 3x3 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. stride : int or tuple/list of 2 int, default 1 Stride of the second convolution. use_bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layers. """ def __init__(self, in_channels, out_channels, mid_channels, stride=1, use_bias=True, use_bn=True): super(Inception3x3Branch, self).__init__() with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_bias=use_bias, use_bn=use_bn) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, stride=stride, use_bias=use_bias, use_bn=use_bn) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class InceptionDouble3x3Branch(Chain): """ BN-Inception double 3x3 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. stride : int or tuple/list of 2 int, default 1 Stride of the second convolution. use_bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layers. """ def __init__(self, in_channels, out_channels, mid_channels, stride=1, use_bias=True, use_bn=True): super(InceptionDouble3x3Branch, self).__init__() with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_bias=use_bias, use_bn=use_bn) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn) self.conv3 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, stride=stride, use_bias=use_bias, use_bn=use_bn) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class InceptionPoolBranch(Chain): """ BN-Inception avg-pool branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. avg_pool : bool Whether use average pooling or max pooling. use_bias : bool Whether the convolution layer uses a bias vector. use_bn : bool Whether to use BatchNorm layers. """ def __init__(self, in_channels, out_channels, avg_pool, use_bias, use_bn): super(InceptionPoolBranch, self).__init__() with self.init_scope(): if avg_pool: self.pool = partial( F.average_pooling_2d, ksize=3, stride=1, pad=1) else: self.pool = partial( F.max_pooling_2d, ksize=3, stride=1, pad=1, cover_all=True) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn) def __call__(self, x): x = self.pool(x) x = self.conv(x) return x class StemBlock(Chain): """ BN-Inception stem block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. use_bias : bool Whether the convolution layer uses a bias vector. use_bn : bool Whether to use BatchNorm layers. """ def __init__(self, in_channels, out_channels, mid_channels, use_bias, use_bn): super(StemBlock, self).__init__() with self.init_scope(): self.conv1 = conv7x7_block( in_channels=in_channels, out_channels=mid_channels, stride=2, use_bias=use_bias, use_bn=use_bn) self.pool1 = partial( F.max_pooling_2d, ksize=3, stride=2, pad=0, cover_all=True) self.conv2 = Inception3x3Branch( in_channels=mid_channels, out_channels=out_channels, mid_channels=mid_channels, use_bias=use_bias, use_bn=use_bn) self.pool2 = partial( F.max_pooling_2d, ksize=3, stride=2, pad=0, cover_all=True) def __call__(self, x): x = self.conv1(x) x = self.pool1(x) x = self.conv2(x) x = self.pool2(x) return x class InceptionBlock(Chain): """ BN-Inception unit. Parameters: ---------- in_channels : int Number of input channels. mid1_channels_list : list of int Number of pre-middle channels for branches. mid2_channels_list : list of int Number of middle channels for branches. avg_pool : bool Whether use average pooling or max pooling. use_bias : bool Whether the convolution layer uses a bias vector. use_bn : bool Whether to use BatchNorm layers. """ def __init__(self, in_channels, mid1_channels_list, mid2_channels_list, avg_pool, use_bias, use_bn): super(InceptionBlock, self).__init__() assert (len(mid1_channels_list) == 2) assert (len(mid2_channels_list) == 4) with self.init_scope(): self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", conv1x1_block( in_channels=in_channels, out_channels=mid2_channels_list[0], use_bias=use_bias, use_bn=use_bn)) setattr(self.branches, "branch2", Inception3x3Branch( in_channels=in_channels, out_channels=mid2_channels_list[1], mid_channels=mid1_channels_list[0], use_bias=use_bias, use_bn=use_bn)) setattr(self.branches, "branch3", InceptionDouble3x3Branch( in_channels=in_channels, out_channels=mid2_channels_list[2], mid_channels=mid1_channels_list[1], use_bias=use_bias, use_bn=use_bn)) setattr(self.branches, "branch4", InceptionPoolBranch( in_channels=in_channels, out_channels=mid2_channels_list[3], avg_pool=avg_pool, use_bias=use_bias, use_bn=use_bn)) def __call__(self, x): x = self.branches(x) return x class ReductionBlock(Chain): """ BN-Inception reduction block. Parameters: ---------- in_channels : int Number of input channels. mid1_channels_list : list of int Number of pre-middle channels for branches. mid2_channels_list : list of int Number of middle channels for branches. use_bias : bool Whether the convolution layer uses a bias vector. use_bn : bool Whether to use BatchNorm layers. """ def __init__(self, in_channels, mid1_channels_list, mid2_channels_list, use_bias, use_bn): super(ReductionBlock, self).__init__() assert (len(mid1_channels_list) == 2) assert (len(mid2_channels_list) == 4) with self.init_scope(): self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", Inception3x3Branch( in_channels=in_channels, out_channels=mid2_channels_list[1], mid_channels=mid1_channels_list[0], stride=2, use_bias=use_bias, use_bn=use_bn)) setattr(self.branches, "branch2", InceptionDouble3x3Branch( in_channels=in_channels, out_channels=mid2_channels_list[2], mid_channels=mid1_channels_list[1], stride=2, use_bias=use_bias, use_bn=use_bn)) setattr(self.branches, "branch3", partial( F.max_pooling_2d, ksize=3, stride=2, pad=0, cover_all=True)) def __call__(self, x): x = self.branches(x) return x class BNInception(Chain): """ BN-Inception model from 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift,' https://arxiv.org/abs/1502.03167. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels_list : list of int Number of output channels for the initial unit. mid1_channels_list : list of list of list of int Number of pre-middle channels for each unit. mid2_channels_list : list of list of list of int Number of middle channels for each unit. use_bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels_list, mid1_channels_list, mid2_channels_list, use_bias=True, use_bn=True, in_channels=3, in_size=(224, 224), classes=1000): super(BNInception, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", StemBlock( in_channels=in_channels, out_channels=init_block_channels_list[1], mid_channels=init_block_channels_list[0], use_bias=use_bias, use_bn=use_bn)) in_channels = init_block_channels_list[-1] for i, channels_per_stage in enumerate(channels): mid1_channels_list_i = mid1_channels_list[i] mid2_channels_list_i = mid2_channels_list[i] stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): if (j == 0) and (i != 0): setattr(stage, "unit{}".format(j + 1), ReductionBlock( in_channels=in_channels, mid1_channels_list=mid1_channels_list_i[j], mid2_channels_list=mid2_channels_list_i[j], use_bias=use_bias, use_bn=use_bn)) else: avg_pool = (i != len(channels) - 1) or (j != len(channels_per_stage) - 1) setattr(stage, "unit{}".format(j + 1), InceptionBlock( in_channels=in_channels, mid1_channels_list=mid1_channels_list_i[j], mid2_channels_list=mid2_channels_list_i[j], avg_pool=avg_pool, use_bias=use_bias, use_bn=use_bn)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_bninception(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create BN-Inception model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ init_block_channels_list = [64, 192] channels = [[256, 320], [576, 576, 576, 608, 608], [1056, 1024, 1024]] mid1_channels_list = [ [[64, 64], [64, 64]], [[128, 64], # 3c [64, 96], # 4a [96, 96], # 4a [128, 128], # 4c [128, 160]], # 4d [[128, 192], # 4e [192, 160], # 5a [192, 192]], ] mid2_channels_list = [ [[64, 64, 96, 32], [64, 96, 96, 64]], [[0, 160, 96, 0], # 3c [224, 96, 128, 128], # 4a [192, 128, 128, 128], # 4b [160, 160, 160, 128], # 4c [96, 192, 192, 128]], # 4d [[0, 192, 256, 0], # 4e [352, 320, 224, 128], # 5a [352, 320, 224, 128]], ] net = BNInception( channels=channels, init_block_channels_list=init_block_channels_list, mid1_channels_list=mid1_channels_list, mid2_channels_list=mid2_channels_list, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def bninception(**kwargs): """ BN-Inception model from 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift,' https://arxiv.org/abs/1502.03167. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_bninception(model_name="bninception", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ bninception, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != bninception or weight_count == 11295240) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
17,722
31.82037
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/zfnet.py
""" ZFNet for ImageNet-1K, implemented in Chainer. Original paper: 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901. """ __all__ = ['zfnet', 'zfnetb'] import os from chainer.serializers import load_npz from .alexnet import AlexNet def get_zfnet(version="a", model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ZFNet model with specific parameters. Parameters: ---------- version : str, default 'a' Version of ZFNet ('a' or 'b'). model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if version == "a": channels = [[96], [256], [384, 384, 256]] ksizes = [[7], [5], [3, 3, 3]] strides = [[2], [2], [1, 1, 1]] pads = [[1], [0], [1, 1, 1]] use_lrn = True elif version == "b": channels = [[96], [256], [512, 1024, 512]] ksizes = [[7], [5], [3, 3, 3]] strides = [[2], [2], [1, 1, 1]] pads = [[1], [0], [1, 1, 1]] use_lrn = True else: raise ValueError("Unsupported ZFNet version {}".format(version)) net = AlexNet( channels=channels, ksizes=ksizes, strides=strides, pads=pads, use_lrn=use_lrn, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def zfnet(**kwargs): """ ZFNet model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_zfnet(model_name="zfnet", **kwargs) def zfnetb(**kwargs): """ ZFNet-b model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_zfnet(version="b", model_name="zfnetb", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ zfnet, zfnetb, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != zfnet or weight_count == 62357608) assert (model != zfnetb or weight_count == 107627624) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
3,469
27.211382
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/peleenet.py
""" PeleeNet for ImageNet-1K, implemented in Chainer. Original paper: 'Pelee: A Real-Time Object Detection System on Mobile Devices,' https://arxiv.org/abs/1804.06882. """ __all__ = ['PeleeNet', 'peleenet'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, conv3x3_block, Concurrent, SimpleSequential class PeleeBranch1(Chain): """ PeleeNet branch type 1 block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. stride : int or tuple/list of 2 int, default 1 Stride of the second convolution. """ def __init__(self, in_channels, out_channels, mid_channels, stride=1): super(PeleeBranch1, self).__init__() with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, stride=stride) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class PeleeBranch2(Chain): """ PeleeNet branch type 2 block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. """ def __init__(self, in_channels, out_channels, mid_channels): super(PeleeBranch2, self).__init__() with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels) self.conv3 = conv3x3_block( in_channels=out_channels, out_channels=out_channels) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class StemBlock(Chain): """ PeleeNet stem block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(StemBlock, self).__init__() mid1_channels = out_channels // 2 mid2_channels = out_channels * 2 with self.init_scope(): self.first_conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2) self.branches = Concurrent() with self.branches.init_scope(): setattr(self.branches, "branch1", PeleeBranch1( in_channels=out_channels, out_channels=out_channels, mid_channels=mid1_channels, stride=2)) setattr(self.branches, "branch2", partial( F.max_pooling_2d, ksize=2, stride=2, pad=0, cover_all=False)) self.last_conv = conv1x1_block( in_channels=mid2_channels, out_channels=out_channels) def __call__(self, x): x = self.first_conv(x) x = self.branches(x) x = self.last_conv(x) return x class DenseBlock(Chain): """ PeleeNet dense block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bottleneck_size : int Bottleneck width. """ def __init__(self, in_channels, out_channels, bottleneck_size): super(DenseBlock, self).__init__() inc_channels = (out_channels - in_channels) // 2 mid_channels = inc_channels * bottleneck_size with self.init_scope(): self.branch1 = PeleeBranch1( in_channels=in_channels, out_channels=inc_channels, mid_channels=mid_channels) self.branch2 = PeleeBranch2( in_channels=in_channels, out_channels=inc_channels, mid_channels=mid_channels) def __call__(self, x): x1 = self.branch1(x) x2 = self.branch2(x) x = F.concat((x, x1, x2), axis=1) return x class TransitionBlock(Chain): """ PeleeNet's transition block, like in DensNet, but with ordinary convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(TransitionBlock, self).__init__() with self.init_scope(): self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels) self.pool = partial( F.average_pooling_2d, ksize=2, stride=2, pad=0) def __call__(self, x): x = self.conv(x) x = self.pool(x) return x class PeleeNet(Chain): """ PeleeNet model from 'Pelee: A Real-Time Object Detection System on Mobile Devices,' https://arxiv.org/abs/1804.06882. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck_sizes : list of int Bottleneck sizes for each stage. dropout_rate : float, default 0.5 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck_sizes, dropout_rate=0.5, in_channels=3, in_size=(224, 224), classes=1000): super(PeleeNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", StemBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): bottleneck_size = bottleneck_sizes[i] stage = SimpleSequential() with stage.init_scope(): if i != 0: setattr(stage, "trans{}".format(i + 1), TransitionBlock( in_channels=in_channels, out_channels=in_channels)) for j, out_channels in enumerate(channels_per_stage): setattr(stage, "unit{}".format(j + 1), DenseBlock( in_channels=in_channels, out_channels=out_channels, bottleneck_size=bottleneck_size)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", conv1x1_block( in_channels=in_channels, out_channels=in_channels)) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "dropout", partial( F.dropout, ratio=dropout_rate)) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_peleenet(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create PeleeNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ init_block_channels = 32 growth_rate = 32 layers = [3, 4, 8, 6] bottleneck_sizes = [1, 2, 4, 4] from functools import reduce channels = reduce( lambda xi, yi: xi + [reduce( lambda xj, yj: xj + [xj[-1] + yj], [growth_rate] * yi, [xi[-1][-1]])[1:]], layers, [[init_block_channels]])[1:] net = PeleeNet( channels=channels, init_block_channels=init_block_channels, bottleneck_sizes=bottleneck_sizes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def peleenet(**kwargs): """ PeleeNet model from 'Pelee: A Real-Time Object Detection System on Mobile Devices,' https://arxiv.org/abs/1804.06882. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_peleenet(model_name="peleenet", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ peleenet, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != peleenet or weight_count == 2802248) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
11,514
29.382586
117
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/__init__.py
0
0
0
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/sharesnet.py
""" ShaResNet for ImageNet-1K, implemented in Chainer. Original paper: 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. """ __all__ = ['ShaResNet', 'sharesnet18', 'sharesnet34', 'sharesnet50', 'sharesnet50b', 'sharesnet101', 'sharesnet101b', 'sharesnet152', 'sharesnet152b'] import os from inspect import isfunction import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import ReLU6, conv1x1_block, conv3x3_block, SimpleSequential from .resnet import ResInitBlock class ShaConvBlock(Chain): """ Shared convolution block with Batch normalization and ReLU/ReLU6 activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. activation : function or str or None, default F.activate Activation function or name of activation function. activate : bool, default True Whether activate the convolution block. shared_conv : Chain, default None Shared convolution layer. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, groups=1, use_bias=False, activation=(lambda: F.relu), activate=True, shared_conv=None): super(ShaConvBlock, self).__init__() self.activate = activate with self.init_scope(): if shared_conv is None: self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=(not use_bias), dilate=dilate, groups=groups) else: self.conv = shared_conv self.bn = L.BatchNormalization( size=out_channels, eps=1e-5) if self.activate: assert (activation is not None) if isfunction(activation): self.activ = activation() elif isinstance(activation, str): if activation == "relu": self.activ = F.relu elif activation == "relu6": self.activ = ReLU6() else: raise NotImplementedError() else: self.activ = activation def __call__(self, x): x = self.conv(x) x = self.bn(x) if self.activate: x = self.activ(x) return x def sha_conv3x3_block(in_channels, out_channels, stride=1, pad=1, dilate=1, groups=1, use_bias=False, activation=(lambda: F.relu), activate=True, shared_conv=None): """ 3x3 version of the shared convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilate : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. activation : function or str or None, default F.activate Activation function or name of activation function. activate : bool, default True Whether activate the convolution block. shared_conv : Chain, default None Shared convolution layer. """ return ShaConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, dilate=dilate, groups=groups, use_bias=use_bias, activation=activation, activate=activate, shared_conv=shared_conv) class ShaResBlock(Chain): """ Simple ShaResNet block for residual path in ShaResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. shared_conv : Chain, default None Shared convolution layer. """ def __init__(self, in_channels, out_channels, stride, shared_conv=None): super(ShaResBlock, self).__init__() with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride) self.conv2 = sha_conv3x3_block( in_channels=out_channels, out_channels=out_channels, activation=None, activate=False, shared_conv=shared_conv) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class ShaResBottleneck(Chain): """ ShaResNet bottleneck block for residual path in ShaResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. bottleneck_factor : int, default 4 Bottleneck factor. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. shared_conv : Chain, default None Shared convolution layer. """ def __init__(self, in_channels, out_channels, stride, conv1_stride=False, bottleneck_factor=4, shared_conv=None): super(ShaResBottleneck, self).__init__() assert (conv1_stride or not ((stride > 1) and (shared_conv is not None))) mid_channels = out_channels // bottleneck_factor with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, stride=(stride if conv1_stride else 1)) self.conv2 = sha_conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=(1 if conv1_stride else stride), shared_conv=shared_conv) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class ShaResUnit(Chain): """ ShaResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. shared_conv : Chain, default None Shared convolution layer. """ def __init__(self, in_channels, out_channels, stride, bottleneck, conv1_stride, shared_conv=None): super(ShaResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): if bottleneck: self.body = ShaResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride, shared_conv=shared_conv) else: self.body = ShaResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, shared_conv=shared_conv) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class ShaResNet(Chain): """ ShaResNet model from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000): super(ShaResNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() shared_conv = None with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 unit = ShaResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride, shared_conv=shared_conv) if (shared_conv is None) and not (bottleneck and not conv1_stride and stride > 1): shared_conv = unit.body.conv2.conv setattr(stage, "unit{}".format(j + 1), unit) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_sharesnet(blocks, conv1_stride=True, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create ShaResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 18: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported ShaResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 if blocks < 50: channels_per_layers = [64, 128, 256, 512] bottleneck = False else: channels_per_layers = [256, 512, 1024, 2048] bottleneck = True channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = ShaResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def sharesnet18(**kwargs): """ ShaResNet-18 model from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=18, model_name="sharesnet18", **kwargs) def sharesnet34(**kwargs): """ ShaResNet-34 model from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=34, model_name="sharesnet34", **kwargs) def sharesnet50(**kwargs): """ ShaResNet-50 model from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=50, model_name="sharesnet50", **kwargs) def sharesnet50b(**kwargs): """ ShaResNet-50b model with stride at the second convolution in bottleneck block from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=50, conv1_stride=False, model_name="sharesnet50b", **kwargs) def sharesnet101(**kwargs): """ ShaResNet-101 model from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=101, model_name="sharesnet101", **kwargs) def sharesnet101b(**kwargs): """ ShaResNet-101b model with stride at the second convolution in bottleneck block from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=101, conv1_stride=False, model_name="sharesnet101b", **kwargs) def sharesnet152(**kwargs): """ ShaResNet-152 model from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=152, model_name="sharesnet152", **kwargs) def sharesnet152b(**kwargs): """ ShaResNet-152b model with stride at the second convolution in bottleneck block from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=152, conv1_stride=False, model_name="sharesnet152b", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ sharesnet18, sharesnet34, sharesnet50, sharesnet50b, sharesnet101, sharesnet101b, sharesnet152, sharesnet152b, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) # assert (model != sharesnet18 or weight_count == 8556072) # assert (model != sharesnet34 or weight_count == 13613864) # assert (model != sharesnet50 or weight_count == 17373224) # assert (model != sharesnet50b or weight_count == 20469800) # assert (model != sharesnet101 or weight_count == 26338344) # assert (model != sharesnet101b or weight_count == 29434920) # assert (model != sharesnet152 or weight_count == 33724456) # assert (model != sharesnet152b or weight_count == 36821032) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
20,387
32.205212
117
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/ibppose_coco.py
""" IBPPose for COCO Keypoint, implemented in Chainer. Original paper: 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person Pose Estimation,' https://arxiv.org/abs/1911.10529. """ __all__ = ['IbpPose', 'ibppose_coco'] import os import chainer.functions as F from functools import partial from chainer import Chain from chainer.serializers import load_npz from .common import get_activation_layer, conv1x1_block, conv3x3_block, conv7x7_block, SEBlock, Hourglass,\ SimpleSequential, InterpolationBlock class IbpResBottleneck(Chain): """ Bottleneck block for residual path in the residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. use_bias : bool, default False Whether the layer uses a bias vector. bottleneck_factor : int, default 2 Bottleneck factor. activation : function or str or None, default F.relu Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, stride, use_bias=False, bottleneck_factor=2, activation=(lambda: F.relu)): super(IbpResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_bias=use_bias, activation=activation) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, use_bias=use_bias, activation=activation) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, use_bias=use_bias, activation=None) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class IbpResUnit(Chain): """ ResNet-like residual unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. use_bias : bool, default False Whether the layer uses a bias vector. bottleneck_factor : int, default 2 Bottleneck factor. activation : function or str or None, default F.relu Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, stride=1, use_bias=False, bottleneck_factor=2, activation=(lambda: F.relu)): super(IbpResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): self.body = IbpResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=use_bias, bottleneck_factor=bottleneck_factor, activation=activation) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=use_bias, activation=None) self.activ = get_activation_layer(activation) def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class IbpBackbone(Chain): """ IBPPose backbone. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activation : function or str or None Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, activation): super(IbpBackbone, self).__init__() dilations = (3, 3, 4, 4, 5, 5) mid1_channels = out_channels // 4 mid2_channels = out_channels // 2 with self.init_scope(): self.conv1 = conv7x7_block( in_channels=in_channels, out_channels=mid1_channels, stride=2, activation=activation) self.res1 = IbpResUnit( in_channels=mid1_channels, out_channels=mid2_channels, activation=activation) self.pool = partial( F.max_pooling_2d, ksize=2, stride=2) self.res2 = IbpResUnit( in_channels=mid2_channels, out_channels=mid2_channels, activation=activation) self.dilation_branch = SimpleSequential() with self.dilation_branch.init_scope(): for i, dilation in enumerate(dilations): setattr(self.dilation_branch, "block{}".format(i + 1), conv3x3_block( in_channels=mid2_channels, out_channels=mid2_channels, pad=dilation, dilate=dilation, activation=activation)) def __call__(self, x): x = self.conv1(x) x = self.res1(x) x = self.pool(x) x = self.res2(x) y = self.dilation_branch(x) x = F.concat((x, y), axis=1) return x class IbpDownBlock(Chain): """ IBPPose down block for the hourglass. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activation : function or str or None Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, activation): super(IbpDownBlock, self).__init__() with self.init_scope(): self.down = partial( F.max_pooling_2d, ksize=2, stride=2) self.res = IbpResUnit( in_channels=in_channels, out_channels=out_channels, activation=activation) def __call__(self, x): x = self.down(x) x = self.res(x) return x class IbpUpBlock(Chain): """ IBPPose up block for the hourglass. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_bn : bool Whether to use BatchNorm layer. activation : function or str or None Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, use_bn, activation): super(IbpUpBlock, self).__init__() with self.init_scope(): self.res = IbpResUnit( in_channels=in_channels, out_channels=out_channels, activation=activation) self.up = InterpolationBlock( scale_factor=2, mode="nearest") self.conv = conv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bias=(not use_bn), use_bn=use_bn, activation=activation) def __call__(self, x): x = self.res(x) x = self.up(x) x = self.conv(x) return x class MergeBlock(Chain): """ IBPPose merge block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_bn : bool Whether to use BatchNorm layer. """ def __init__(self, in_channels, out_channels, use_bn): super(MergeBlock, self).__init__() with self.init_scope(): self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=(not use_bn), use_bn=use_bn, activation=None) def __call__(self, x): return self.conv(x) class IbpPreBlock(Chain): """ IBPPose preliminary decoder block. Parameters: ---------- out_channels : int Number of output channels. use_bn : bool Whether to use BatchNorm layer. activation : function or str or None Activation function or name of activation function. """ def __init__(self, out_channels, use_bn, activation): super(IbpPreBlock, self).__init__() with self.init_scope(): self.conv1 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bias=(not use_bn), use_bn=use_bn, activation=activation) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bias=(not use_bn), use_bn=use_bn, activation=activation) self.se = SEBlock( channels=out_channels, use_conv=False, mid_activation=activation) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.se(x) return x class IbpPass(Chain): """ IBPPose single pass decoder block. Parameters: ---------- channels : int Number of input/output channels. mid_channels : int Number of middle channels. depth : int Depth of hourglass. growth_rate : int Addition for number of channel for each level. use_bn : bool Whether to use BatchNorm layer. activation : function or str or None Activation function or name of activation function. """ def __init__(self, channels, mid_channels, depth, growth_rate, merge, use_bn, activation): super(IbpPass, self).__init__() self.merge = merge with self.init_scope(): down_seq = SimpleSequential() up_seq = SimpleSequential() skip_seq = SimpleSequential() top_channels = channels bottom_channels = channels for i in range(depth + 1): with skip_seq.init_scope(): setattr(skip_seq, "skip{}".format(i + 1), IbpResUnit( in_channels=top_channels, out_channels=top_channels, activation=activation)) bottom_channels += growth_rate if i < depth: with down_seq.init_scope(): setattr(down_seq, "down{}".format(i + 1), IbpDownBlock( in_channels=top_channels, out_channels=bottom_channels, activation=activation)) with up_seq.init_scope(): setattr(up_seq, "up{}".format(i + 1), IbpUpBlock( in_channels=bottom_channels, out_channels=top_channels, use_bn=use_bn, activation=activation)) top_channels = bottom_channels self.hg = Hourglass( down_seq=down_seq, up_seq=up_seq, skip_seq=skip_seq) self.pre_block = IbpPreBlock( out_channels=channels, use_bn=use_bn, activation=activation) self.post_block = conv1x1_block( in_channels=channels, out_channels=mid_channels, use_bias=True, use_bn=False, activation=None) if self.merge: self.pre_merge_block = MergeBlock( in_channels=channels, out_channels=channels, use_bn=use_bn) self.post_merge_block = MergeBlock( in_channels=mid_channels, out_channels=channels, use_bn=use_bn) def __call__(self, x, x_prev): x = self.hg(x) if x_prev is not None: x = x + x_prev y = self.pre_block(x) z = self.post_block(y) if self.merge: z = self.post_merge_block(z) + self.pre_merge_block(y) return z class IbpPose(Chain): """ IBPPose model from 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person Pose Estimation,' https://arxiv.org/abs/1911.10529. Parameters: ---------- passes : int Number of passes. backbone_out_channels : int Number of output channels for the backbone. outs_channels : int Number of output channels for the backbone. depth : int Depth of hourglass. growth_rate : int Addition for number of channel for each level. use_bn : bool Whether to use BatchNorm layer. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (256, 256) Spatial size of the expected input image. """ def __init__(self, passes, backbone_out_channels, outs_channels, depth, growth_rate, use_bn, in_channels=3, in_size=(256, 256)): super(IbpPose, self).__init__() self.in_size = in_size activation = partial(F.leaky_relu, slope=0.01) with self.init_scope(): self.backbone = IbpBackbone( in_channels=in_channels, out_channels=backbone_out_channels, activation=activation) self.decoder = SimpleSequential() with self.decoder.init_scope(): for i in range(passes): merge = (i != passes - 1) setattr(self.decoder, "pass{}".format(i + 1), IbpPass( channels=backbone_out_channels, mid_channels=outs_channels, depth=depth, growth_rate=growth_rate, merge=merge, use_bn=use_bn, activation=activation)) def __call__(self, x): x = self.backbone(x) x_prev = None for block_name in self.decoder.layer_names: block = self.decoder[block_name] if x_prev is not None: x = x + x_prev x_prev = block(x, x_prev) return x_prev def get_ibppose(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create IBPPose model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ passes = 4 backbone_out_channels = 256 outs_channels = 50 depth = 4 growth_rate = 128 use_bn = True net = IbpPose( passes=passes, backbone_out_channels=backbone_out_channels, outs_channels=outs_channels, depth=depth, growth_rate=growth_rate, use_bn=use_bn, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def ibppose_coco(**kwargs): """ IBPPose model for COCO Keypoint from 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person Pose Estimation,' https://arxiv.org/abs/1911.10529. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_ibppose(model_name="ibppose_coco", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False in_size = (256, 256) pretrained = False models = [ ibppose_coco, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != ibppose_coco or weight_count == 95827784) batch = 14 x = np.random.rand(batch, 3, in_size[0], in_size[1]).astype(np.float32) y = net(x) assert ((y.shape[0] == batch) and (y.shape[1] == 50)) assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4)) if __name__ == "__main__": _test()
18,194
29.943878
117
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/xception.py
""" Xception for ImageNet-1K, implemented in Chainer. Original paper: 'Xception: Deep Learning with Depthwise Separable Convolutions,' https://arxiv.org/abs/1610.02357. """ __all__ = ['Xception', 'xception'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, conv3x3_block, SimpleSequential class DwsConv(Chain): """ Depthwise separable convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 0 Padding value for convolution layer. """ def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0): super(DwsConv, self).__init__() with self.init_scope(): self.dw_conv = L.Convolution2D( in_channels=in_channels, out_channels=in_channels, ksize=ksize, stride=stride, pad=pad, nobias=True, groups=in_channels) self.pw_conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=1, nobias=True) def __call__(self, x): x = self.dw_conv(x) x = self.pw_conv(x) return x class DwsConvBlock(Chain): """ Depthwise separable convolution block with batchnorm and ReLU pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. activate : bool Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride, pad, activate): super(DwsConvBlock, self).__init__() self.activate = activate with self.init_scope(): if self.activate: self.activ = F.relu self.conv = DwsConv( in_channels=in_channels, out_channels=out_channels, ksize=kernel_size, stride=stride, pad=pad) self.bn = L.BatchNormalization( size=out_channels, eps=1e-5) def __call__(self, x): if self.activate: x = self.activ(x) x = self.conv(x) x = self.bn(x) return x def dws_conv3x3_block(in_channels, out_channels, activate): """ 3x3 version of the depthwise separable convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activate : bool Whether activate the convolution block. """ return DwsConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, pad=1, activate=activate) class XceptionUnit(Chain): """ Xception unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the downsample polling. reps : int Number of repetitions. start_with_relu : bool, default True Whether start with ReLU activation. grow_first : bool, default True Whether start from growing. """ def __init__(self, in_channels, out_channels, stride, reps, start_with_relu=True, grow_first=True): super(XceptionUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.body = SimpleSequential() with self.body.init_scope(): for i in range(reps): if (grow_first and (i == 0)) or ((not grow_first) and (i == reps - 1)): in_channels_i = in_channels out_channels_i = out_channels else: if grow_first: in_channels_i = out_channels out_channels_i = out_channels else: in_channels_i = in_channels out_channels_i = in_channels activate = start_with_relu if (i == 0) else True setattr(self.body, "block{}".format(i + 1), dws_conv3x3_block( in_channels=in_channels_i, out_channels=out_channels_i, activate=activate)) if stride != 1: setattr(self.body, "pool", partial( F.max_pooling_2d, ksize=3, stride=stride, pad=1, cover_all=False)) def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = F.identity(x) x = self.body(x) x = x + identity return x class XceptionInitBlock(Chain): """ Xception specific initial block. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(XceptionInitBlock, self).__init__() with self.init_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=32, stride=2, pad=0) self.conv2 = conv3x3_block( in_channels=32, out_channels=64, stride=1, pad=0) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) return x class XceptionFinalBlock(Chain): """ Xception specific final block. """ def __init__(self): super(XceptionFinalBlock, self).__init__() with self.init_scope(): self.conv1 = dws_conv3x3_block( in_channels=1024, out_channels=1536, activate=False) self.conv2 = dws_conv3x3_block( in_channels=1536, out_channels=2048, activate=True) self.activ = F.relu self.pool = partial( F.average_pooling_2d, ksize=10, stride=1) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.activ(x) x = self.pool(x) return x class Xception(Chain): """ Xception model from 'Xception: Deep Learning with Depthwise Separable Convolutions,' https://arxiv.org/abs/1610.02357. Parameters: ---------- channels : list of list of int Number of output channels for each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, in_channels=3, in_size=(299, 299), classes=1000, **kwargs): super(Xception, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", XceptionInitBlock( in_channels=in_channels)) in_channels = 64 for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): setattr(stage, "unit{}".format(j + 1), XceptionUnit( in_channels=in_channels, out_channels=out_channels, stride=(2 if (j == 0) else 1), reps=(2 if (j == 0) else 3), start_with_relu=((i != 0) or (j != 0)), grow_first=((i != len(channels) - 1) or (j != len(channels_per_stage) - 1)))) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", XceptionFinalBlock()) in_channels = 2048 self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_xception(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create Xception model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ channels = [[128], [256], [728] * 9, [1024]] net = Xception( channels=channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def xception(**kwargs): """ Xception model from 'Xception: Deep Learning with Depthwise Separable Convolutions,' https://arxiv.org/abs/1610.02357. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_xception(model_name="xception", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ xception, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != xception or weight_count == 22855952) x = np.zeros((1, 3, 299, 299), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
12,358
29.291667
118
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/darknet53.py
""" DarkNet-53 for ImageNet-1K, implemented in Chainer. Original source: 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767. """ __all__ = ['DarkNet53', 'darknet53'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1_block, conv3x3_block, SimpleSequential class DarkUnit(Chain): """ DarkNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. alpha : float Slope coefficient for Leaky ReLU activation. """ def __init__(self, in_channels, out_channels, alpha): super(DarkUnit, self).__init__() assert (out_channels % 2 == 0) mid_channels = out_channels // 2 with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=partial( F.leaky_relu, slope=alpha)) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, activation=partial( F.leaky_relu, slope=alpha)) def __call__(self, x): identity = x x = self.conv1(x) x = self.conv2(x) return x + identity class DarkNet53(Chain): """ DarkNet-53 model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. alpha : float, default 0.1 Slope coefficient for Leaky ReLU activation. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, alpha=0.1, in_channels=3, in_size=(224, 224), classes=1000): super(DarkNet53, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, activation=partial( F.leaky_relu, slope=alpha))) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): if j == 0: setattr(stage, "unit{}".format(j + 1), conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, activation=partial( F.leaky_relu, slope=alpha))) else: setattr(stage, "unit{}".format(j + 1), DarkUnit( in_channels=in_channels, out_channels=out_channels, alpha=alpha)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_darknet53(model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create DarkNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ init_block_channels = 32 layers = [2, 3, 9, 9, 5] channels_per_layers = [64, 128, 256, 512, 1024] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = DarkNet53( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def darknet53(**kwargs): """ DarkNet-53 'Reference' model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_darknet53(model_name="darknet53", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ darknet53, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != darknet53 or weight_count == 41609928) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
6,985
31.193548
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/mobilenet.py
""" MobileNet for ImageNet-1K, implemented in Chainer. Original paper: 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. """ __all__ = ['MobileNet', 'mobilenet_w1', 'mobilenet_w3d4', 'mobilenet_wd2', 'mobilenet_wd4', 'get_mobilenet'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv3x3_block, dwsconv3x3_block, SimpleSequential class MobileNet(Chain): """ MobileNet model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- channels : list of list of int Number of output channels for each unit. first_stage_stride : bool Whether stride is used at the first stage. dw_use_bn : bool, default True Whether to use BatchNorm layer (depthwise convolution block). dw_activation : function or str or None, default F.relu Activation function after the depthwise convolution block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, first_stage_stride, dw_use_bn=True, dw_activation=(lambda: F.relu), in_channels=3, in_size=(224, 224), classes=1000): super(MobileNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): init_block_channels = channels[0][0] setattr(self.features, "init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels[1:]): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and ((i != 0) or first_stage_stride) else 1 setattr(stage, "unit{}".format(j + 1), dwsconv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, dw_use_bn=dw_use_bn, dw_activation=dw_activation)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_mobilenet(width_scale, dws_simplified=False, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create MobileNet model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. dws_simplified : bool, default False Whether to use simplified depthwise separable convolution block. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 512], [1024, 1024]] first_stage_stride = False if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] if dws_simplified: dw_use_bn = False dw_activation = None else: dw_use_bn = True dw_activation = (lambda: F.relu) net = MobileNet( channels=channels, first_stage_stride=first_stage_stride, dw_use_bn=dw_use_bn, dw_activation=dw_activation, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def mobilenet_w1(**kwargs): """ 1.0 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=1.0, model_name="mobilenet_w1", **kwargs) def mobilenet_w3d4(**kwargs): """ 0.75 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=0.75, model_name="mobilenet_w3d4", **kwargs) def mobilenet_wd2(**kwargs): """ 0.5 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=0.5, model_name="mobilenet_wd2", **kwargs) def mobilenet_wd4(**kwargs): """ 0.25 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=0.25, model_name="mobilenet_wd4", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ mobilenet_w1, mobilenet_w3d4, mobilenet_wd2, mobilenet_wd4, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != mobilenet_w1 or weight_count == 4231976) assert (model != mobilenet_w3d4 or weight_count == 2585560) assert (model != mobilenet_wd2 or weight_count == 1331592) assert (model != mobilenet_wd4 or weight_count == 470072) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
8,305
33.322314
119
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/dpn.py
""" DPN for ImageNet-1K, implemented in Chainer. Original paper: 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. """ __all__ = ['DPN', 'dpn68', 'dpn68b', 'dpn98', 'dpn107', 'dpn131'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, SimpleSequential, DualPathSequential class GlobalAvgPool2D(Chain): """ Global average pooling operation for spatial data. """ def __init__(self): super(GlobalAvgPool2D, self).__init__() def __call__(self, x): batch, channels, height, width = x.shape x = F.average_pooling_2d(x, ksize=(height, width)) return x class GlobalAvgMaxPool2D(Chain): """ Global average+max pooling operation for spatial data. """ def __init__(self): super(GlobalAvgMaxPool2D, self).__init__() def __call__(self, x): batch, channels, height, width = x.shape x_avg = F.average_pooling_2d(x, ksize=(height, width)) x_max = F.max_pooling_2d(x, ksize=(height, width), cover_all=False) x = 0.5 * (x_avg + x_max) return x def dpn_batch_norm(channels): """ DPN specific Batch normalization layer. Parameters: ---------- channels : int Number of channels in input data. """ return L.BatchNormalization( size=channels, eps=0.001) class PreActivation(Chain): """ DPN specific block, which performs the preactivation like in RreResNet. Parameters: ---------- channels : int Number of channels. """ def __init__(self, channels): super(PreActivation, self).__init__() with self.init_scope(): self.bn = dpn_batch_norm(channels=channels) self.activ = F.relu def __call__(self, x): x = self.bn(x) x = self.activ(x) return x class DPNConv(Chain): """ DPN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Stride of the convolution. pad : int or tuple/list of 2 int Padding value for convolution layer. groups : int Number of groups. """ def __init__(self, in_channels, out_channels, ksize, stride, pad, groups): super(DPNConv, self).__init__() with self.init_scope(): self.bn = dpn_batch_norm(channels=in_channels) self.activ = F.relu self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=True, groups=groups) def __call__(self, x): x = self.bn(x) x = self.activ(x) x = self.conv(x) return x def dpn_conv1x1(in_channels, out_channels, stride=1): """ 1x1 version of the DPN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. """ return DPNConv( in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, pad=0, groups=1) def dpn_conv3x3(in_channels, out_channels, stride, groups): """ 3x3 version of the DPN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. groups : int Number of groups. """ return DPNConv( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=1, groups=groups) class DPNUnit(Chain): """ DPN unit. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of intermediate channels. bw : int Number of residual channels. inc : int Incrementing step for channels. groups : int Number of groups in the units. has_proj : bool Whether to use projection. key_strides : int Key strides of the convolutions. b_case : bool, default False Whether to use B-case model. """ def __init__(self, in_channels, mid_channels, bw, inc, groups, has_proj, key_strides, b_case=False): super(DPNUnit, self).__init__() self.bw = bw self.has_proj = has_proj self.b_case = b_case with self.init_scope(): if self.has_proj: self.conv_proj = dpn_conv1x1( in_channels=in_channels, out_channels=bw + 2 * inc, stride=key_strides) self.conv1 = dpn_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.conv2 = dpn_conv3x3( in_channels=mid_channels, out_channels=mid_channels, stride=key_strides, groups=groups) if b_case: self.preactiv = PreActivation(channels=mid_channels) self.conv3a = conv1x1( in_channels=mid_channels, out_channels=bw) self.conv3b = conv1x1( in_channels=mid_channels, out_channels=inc) else: self.conv3 = dpn_conv1x1( in_channels=mid_channels, out_channels=bw + inc) def __call__(self, x1, x2=None): x_in = F.concat((x1, x2), axis=1) if x2 is not None else x1 if self.has_proj: x_s = self.conv_proj(x_in) x_s1 = x_s[:, :self.bw, :, :] x_s2 = x_s[:, self.bw:, :, :] else: assert (x2 is not None) x_s1 = x1 x_s2 = x2 x_in = self.conv1(x_in) x_in = self.conv2(x_in) if self.b_case: x_in = self.preactiv(x_in) y1 = self.conv3a(x_in) y2 = self.conv3b(x_in) else: x_in = self.conv3(x_in) y1 = x_in[:, :self.bw, :, :] y2 = x_in[:, self.bw:, :, :] residual = x_s1 + y1 dense = F.concat((x_s2, y2), axis=1) return residual, dense class DPNInitBlock(Chain): """ DPN specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. ksize : int or tuple/list of 2 int Convolution window size. pad : int or tuple/list of 2 int Padding value for convolution layer. """ def __init__(self, in_channels, out_channels, ksize, pad): super(DPNInitBlock, self).__init__() with self.init_scope(): self.conv = L.Convolution2D( in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=2, pad=pad, nobias=True) self.bn = dpn_batch_norm(channels=out_channels) self.activ = F.relu self.pool = partial( F.max_pooling_2d, ksize=3, stride=2, pad=1, cover_all=False) def __call__(self, x): x = self.conv(x) x = self.bn(x) x = self.activ(x) x = self.pool(x) return x class DPNFinalBlock(Chain): """ DPN final block, which performs the preactivation with cutting. Parameters: ---------- channels : int Number of channels. """ def __init__(self, channels): super(DPNFinalBlock, self).__init__() with self.init_scope(): self.activ = PreActivation(channels=channels) def __call__(self, x1, x2): assert (x2 is not None) x = F.concat((x1, x2), axis=1) x = self.activ(x) return x, None class DPN(Chain): """ DPN model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. init_block_kernel_size : int or tuple/list of 2 int Convolution window size for the initial unit. init_block_padding : int or tuple/list of 2 int Padding value for convolution layer in the initial unit. rs : list f int Number of intermediate channels for each unit. bws : list f int Number of residual channels for each unit. incs : list f int Incrementing step for channels for each unit. groups : int Number of groups in the units. b_case : bool Whether to use B-case model. for_training : bool Whether to use model for training. test_time_pool : bool Whether to use the avg-max pooling in the inference mode. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, init_block_kernel_size, init_block_padding, rs, bws, incs, groups, b_case, for_training, test_time_pool, in_channels=3, in_size=(224, 224), classes=1000): super(DPN, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = DualPathSequential( return_two=False, first_ordinals=1, last_ordinals=0) with self.features.init_scope(): setattr(self.features, "init_block", DPNInitBlock( in_channels=in_channels, out_channels=init_block_channels, ksize=init_block_kernel_size, pad=init_block_padding)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential() r = rs[i] bw = bws[i] inc = incs[i] with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): has_proj = (j == 0) key_strides = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), DPNUnit( in_channels=in_channels, mid_channels=r, bw=bw, inc=inc, groups=groups, has_proj=has_proj, key_strides=key_strides, b_case=b_case)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_block", DPNFinalBlock(channels=in_channels)) self.output = SimpleSequential() with self.output.init_scope(): if for_training or not test_time_pool: setattr(self.output, "final_pool", GlobalAvgPool2D()) setattr(self.output, "final_conv", conv1x1( in_channels=in_channels, out_channels=classes, use_bias=True)) setattr(self.output, "final_flatten", partial( F.reshape, shape=(-1, classes))) else: setattr(self.output, "avg_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) setattr(self.output, "final_conv", conv1x1( in_channels=in_channels, out_channels=classes, use_bias=True)) setattr(self.output, "avgmax_pool", GlobalAvgMaxPool2D()) setattr(self.output, "final_flatten", partial( F.reshape, shape=(-1, classes))) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_dpn(num_layers, b_case=False, for_training=False, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create DPN model with specific parameters. Parameters: ---------- num_layers : int Number of layers. b_case : bool, default False Whether to use B-case model. for_training : bool Whether to use model for training. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if num_layers == 68: init_block_channels = 10 init_block_kernel_size = 3 init_block_padding = 1 bw_factor = 1 k_r = 128 groups = 32 k_sec = (3, 4, 12, 3) incs = (16, 32, 32, 64) test_time_pool = True elif num_layers == 98: init_block_channels = 96 init_block_kernel_size = 7 init_block_padding = 3 bw_factor = 4 k_r = 160 groups = 40 k_sec = (3, 6, 20, 3) incs = (16, 32, 32, 128) test_time_pool = True elif num_layers == 107: init_block_channels = 128 init_block_kernel_size = 7 init_block_padding = 3 bw_factor = 4 k_r = 200 groups = 50 k_sec = (4, 8, 20, 3) incs = (20, 64, 64, 128) test_time_pool = True elif num_layers == 131: init_block_channels = 128 init_block_kernel_size = 7 init_block_padding = 3 bw_factor = 4 k_r = 160 groups = 40 k_sec = (4, 8, 28, 3) incs = (16, 32, 32, 128) test_time_pool = True else: raise ValueError("Unsupported DPN version with number of layers {}".format(num_layers)) channels = [[0] * li for li in k_sec] rs = [0 * li for li in k_sec] bws = [0 * li for li in k_sec] for i in range(len(k_sec)): rs[i] = (2 ** i) * k_r bws[i] = (2 ** i) * 64 * bw_factor inc = incs[i] channels[i][0] = bws[i] + 3 * inc for j in range(1, k_sec[i]): channels[i][j] = channels[i][j - 1] + inc net = DPN( channels=channels, init_block_channels=init_block_channels, init_block_kernel_size=init_block_kernel_size, init_block_padding=init_block_padding, rs=rs, bws=bws, incs=incs, groups=groups, b_case=b_case, for_training=for_training, test_time_pool=test_time_pool, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def dpn68(**kwargs): """ DPN-68 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_dpn(num_layers=68, b_case=False, model_name="dpn68", **kwargs) def dpn68b(**kwargs): """ DPN-68b model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_dpn(num_layers=68, b_case=True, model_name="dpn68b", **kwargs) def dpn98(**kwargs): """ DPN-98 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_dpn(num_layers=98, b_case=False, model_name="dpn98", **kwargs) def dpn107(**kwargs): """ DPN-107 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_dpn(num_layers=107, b_case=False, model_name="dpn107", **kwargs) def dpn131(**kwargs): """ DPN-131 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_dpn(num_layers=131, b_case=False, model_name="dpn131", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = True for_training = False models = [ dpn68, # dpn68b, dpn98, # dpn107, dpn131, ] for model in models: net = model(pretrained=pretrained, for_training=for_training) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != dpn68 or weight_count == 12611602) assert (model != dpn68b or weight_count == 12611602) assert (model != dpn98 or weight_count == 61570728) assert (model != dpn107 or weight_count == 86917800) assert (model != dpn131 or weight_count == 79254504) x = np.zeros((1, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
19,846
28.711078
115
py
imgclsmob
imgclsmob-master/chainer_/chainercv2/models/sknet.py
""" SKNet for ImageNet-1K, implemented in Chainer. Original paper: 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586. """ __all__ = ['SKNet', 'sknet50', 'sknet101', 'sknet152'] import os import chainer.functions as F import chainer.links as L from chainer import Chain from functools import partial from chainer.serializers import load_npz from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent, SimpleSequential from .resnet import ResInitBlock class SKConvBlock(Chain): """ SKNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. groups : int, default 32 Number of groups in branches. num_branches : int, default 2 Number of branches (`M` parameter in the paper). reduction : int, default 16 Reduction value for intermediate channels (`r` parameter in the paper). min_channels : int, default 32 Minimal number of intermediate channels (`L` parameter in the paper). """ def __init__(self, in_channels, out_channels, stride, groups=32, num_branches=2, reduction=16, min_channels=32): super(SKConvBlock, self).__init__() self.num_branches = num_branches self.out_channels = out_channels mid_channels = max(in_channels // reduction, min_channels) with self.init_scope(): self.branches = Concurrent(stack=True) with self.branches.init_scope(): for i in range(num_branches): dilate = 1 + i setattr(self.branches, "branch{}".format(i + 2), conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, pad=dilate, dilate=dilate, groups=groups)) self.fc1 = conv1x1_block( in_channels=out_channels, out_channels=mid_channels) self.fc2 = conv1x1( in_channels=mid_channels, out_channels=(out_channels * num_branches)) self.softmax = partial( F.softmax, axis=1) def __call__(self, x): y = self.branches(x) u = F.sum(y, axis=1) s = F.average_pooling_2d(u, ksize=u.shape[2:]) z = self.fc1(s) w = self.fc2(z) batch = w.shape[0] w = F.reshape(w, shape=(batch, self.num_branches, self.out_channels)) w = self.softmax(w) w = F.expand_dims(F.expand_dims(w, axis=3), axis=4) y = y * w y = F.sum(y, axis=1) return y class SKNetBottleneck(Chain): """ SKNet bottleneck block for residual path in SKNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. bottleneck_factor : int, default 2 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, bottleneck_factor=2): super(SKNetBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor with self.init_scope(): self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = SKConvBlock( in_channels=mid_channels, out_channels=mid_channels, stride=stride) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class SKNetUnit(Chain): """ SKNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Stride of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(SKNetUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) with self.init_scope(): self.body = SKNetBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = F.relu def __call__(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class SKNet(Chain): """ SKNet model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, in_channels=3, in_size=(224, 224), classes=1000): super(SKNet, self).__init__() self.in_size = in_size self.classes = classes with self.init_scope(): self.features = SimpleSequential() with self.features.init_scope(): setattr(self.features, "init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential() with stage.init_scope(): for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 setattr(stage, "unit{}".format(j + 1), SKNetUnit( in_channels=in_channels, out_channels=out_channels, stride=stride)) in_channels = out_channels setattr(self.features, "stage{}".format(i + 1), stage) setattr(self.features, "final_pool", partial( F.average_pooling_2d, ksize=7, stride=1)) self.output = SimpleSequential() with self.output.init_scope(): setattr(self.output, "flatten", partial( F.reshape, shape=(-1, in_channels))) setattr(self.output, "fc", L.Linear( in_size=in_channels, out_size=classes)) def __call__(self, x): x = self.features(x) x = self.output(x) return x def get_sknet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".chainer", "models"), **kwargs): """ Create SKNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] else: raise ValueError("Unsupported SKNet with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = SKNet( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file load_npz( file=get_model_file( model_name=model_name, local_model_store_dir_path=root), obj=net) return net def sknet50(**kwargs): """ SKNet-50 model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sknet(blocks=50, model_name="sknet50", **kwargs) def sknet101(**kwargs): """ SKNet-101 model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sknet(blocks=101, model_name="sknet101", **kwargs) def sknet152(**kwargs): """ SKNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_sknet(blocks=152, model_name="sknet152", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False models = [ sknet50, sknet101, sknet152, ] for model in models: net = model(pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != sknet50 or weight_count == 27479784) assert (model != sknet101 or weight_count == 48736040) assert (model != sknet152 or weight_count == 66295656) x = np.zeros((14, 3, 224, 224), np.float32) y = net(x) assert (y.shape == (14, 1000)) if __name__ == "__main__": _test()
11,401
29.983696
115
py