repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/fractalnet_cifar.py
""" FractalNet for CIFAR, implemented in PyTorch. Original paper: 'FractalNet: Ultra-Deep Neural Networks without Residuals,' https://arxiv.org/abs/1605.07648. """ __all__ = ['CIFARFractalNet', 'fractalnet_cifar10', 'fractalnet_cifar100'] import os import numpy as np import torch import torch.nn as nn import torch.nn.init as init from .common import ParametricSequential class DropConvBlock(nn.Module): """ Convolution block with Batch normalization, ReLU activation, and Dropout layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False, dropout_prob=0.0): super(DropConvBlock, self).__init__() self.use_dropout = (dropout_prob != 0.0) self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias) self.bn = nn.BatchNorm2d(num_features=out_channels) self.activ = nn.ReLU(inplace=True) if self.use_dropout: self.dropout = nn.Dropout2d(p=dropout_prob) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.activ(x) if self.use_dropout: x = self.dropout(x) return x def drop_conv3x3_block(in_channels, out_channels, stride=1, padding=1, bias=False, dropout_prob=0.0): """ 3x3 version of the convolution block with dropout. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. """ return DropConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, bias=bias, dropout_prob=dropout_prob) class FractalBlock(nn.Module): """ FractalNet block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. num_columns : int Number of columns in each block. loc_drop_prob : float Local drop path probability. dropout_prob : float Probability of dropout. """ def __init__(self, in_channels, out_channels, num_columns, loc_drop_prob, dropout_prob): super(FractalBlock, self).__init__() assert (num_columns >= 1) self.num_columns = num_columns self.loc_drop_prob = loc_drop_prob self.blocks = nn.Sequential() depth = 2 ** (num_columns - 1) for i in range(depth): level_block_i = nn.Sequential() for j in range(self.num_columns): column_step_j = 2 ** j if (i + 1) % column_step_j == 0: in_channels_ij = in_channels if (i + 1 == column_step_j) else out_channels level_block_i.add_module("subblock{}".format(j + 1), drop_conv3x3_block( in_channels=in_channels_ij, out_channels=out_channels, dropout_prob=dropout_prob)) self.blocks.add_module("block{}".format(i + 1), level_block_i) @staticmethod def calc_drop_mask(batch_size, glob_num_columns, curr_num_columns, max_num_columns, loc_drop_prob): """ Calculate drop path mask. Parameters: ---------- batch_size : int Size of batch. glob_num_columns : int Number of columns in global drop path mask. curr_num_columns : int Number of active columns in the current level of block. max_num_columns : int Number of columns for all network. loc_drop_prob : float Local drop path probability. Returns: ------- Tensor Resulted mask. """ glob_batch_size = glob_num_columns.shape[0] glob_drop_mask = np.zeros((curr_num_columns, glob_batch_size), dtype=np.float32) glob_drop_num_columns = glob_num_columns - (max_num_columns - curr_num_columns) glob_drop_indices = np.where(glob_drop_num_columns >= 0)[0] glob_drop_mask[glob_drop_num_columns[glob_drop_indices], glob_drop_indices] = 1.0 loc_batch_size = batch_size - glob_batch_size loc_drop_mask = np.random.binomial( n=1, p=(1.0 - loc_drop_prob), size=(curr_num_columns, loc_batch_size)).astype(np.float32) alive_count = loc_drop_mask.sum(axis=0) dead_indices = np.where(alive_count == 0.0)[0] loc_drop_mask[np.random.randint(0, curr_num_columns, size=dead_indices.shape), dead_indices] = 1.0 drop_mask = np.concatenate((glob_drop_mask, loc_drop_mask), axis=1) return torch.from_numpy(drop_mask) @staticmethod def join_outs(raw_outs, glob_num_columns, num_columns, loc_drop_prob, training): """ Join outputs for current level of block. Parameters: ---------- raw_outs : list of Tensor Current outputs from active columns. glob_num_columns : int Number of columns in global drop path mask. num_columns : int Number of columns for all network. loc_drop_prob : float Local drop path probability. training : bool Whether training mode for network. Returns: ------- Tensor Joined output. """ curr_num_columns = len(raw_outs) out = torch.stack(raw_outs, dim=0) assert (out.size(0) == curr_num_columns) if training: batch_size = out.size(1) batch_mask = FractalBlock.calc_drop_mask( batch_size=batch_size, glob_num_columns=glob_num_columns, curr_num_columns=curr_num_columns, max_num_columns=num_columns, loc_drop_prob=loc_drop_prob) batch_mask = batch_mask.to(out.device) assert (batch_mask.size(0) == curr_num_columns) assert (batch_mask.size(1) == batch_size) batch_mask = batch_mask.unsqueeze(2).unsqueeze(3).unsqueeze(4) masked_out = out * batch_mask num_alive = batch_mask.sum(dim=0) num_alive[num_alive == 0.0] = 1.0 out = masked_out.sum(dim=0) / num_alive else: out = out.mean(dim=0) return out def forward(self, x, glob_num_columns): outs = [x] * self.num_columns for level_block_i in self.blocks._modules.values(): outs_i = [] for j, block_ij in enumerate(level_block_i._modules.values()): input_i = outs[j] outs_i.append(block_ij(input_i)) joined_out = FractalBlock.join_outs( raw_outs=outs_i[::-1], glob_num_columns=glob_num_columns, num_columns=self.num_columns, loc_drop_prob=self.loc_drop_prob, training=self.training) len_level_block_i = len(level_block_i._modules.values()) for j in range(len_level_block_i): outs[j] = joined_out return outs[0] class FractalUnit(nn.Module): """ FractalNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. num_columns : int Number of columns in each block. loc_drop_prob : float Local drop path probability. dropout_prob : float Probability of dropout. """ def __init__(self, in_channels, out_channels, num_columns, loc_drop_prob, dropout_prob): super(FractalUnit, self).__init__() self.block = FractalBlock( in_channels=in_channels, out_channels=out_channels, num_columns=num_columns, loc_drop_prob=loc_drop_prob, dropout_prob=dropout_prob) self.pool = nn.MaxPool2d( kernel_size=2, stride=2) def forward(self, x, glob_num_columns): x = self.block(x, glob_num_columns=glob_num_columns) x = self.pool(x) return x class CIFARFractalNet(nn.Module): """ FractalNet model for CIFAR from 'FractalNet: Ultra-Deep Neural Networks without Residuals,' https://arxiv.org/abs/1605.07648. Parameters: ---------- channels : list of int Number of output channels for each unit. num_columns : int Number of columns in each block. dropout_probs : list of float Probability of dropout in each block. loc_drop_prob : float Local drop path probability. glob_drop_ratio : float Global drop part fraction. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, num_columns, dropout_probs, loc_drop_prob, glob_drop_ratio, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARFractalNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.glob_drop_ratio = glob_drop_ratio self.num_columns = num_columns self.features = ParametricSequential() for i, out_channels in enumerate(channels): dropout_prob = dropout_probs[i] self.features.add_module("unit{}".format(i + 1), FractalUnit( in_channels=in_channels, out_channels=out_channels, num_columns=num_columns, loc_drop_prob=loc_drop_prob, dropout_prob=dropout_prob)) in_channels = out_channels self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): glob_batch_size = int(x.size(0) * self.glob_drop_ratio) glob_num_columns = np.random.randint(0, self.num_columns, size=(glob_batch_size,)) x = self.features(x, glob_num_columns=glob_num_columns) x = x.view(x.size(0), -1) x = self.output(x) return x def get_fractalnet_cifar(num_classes, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create WRN model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification classes. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ dropout_probs = (0.0, 0.1, 0.2, 0.3, 0.4) channels = [64 * (2 ** (i if i != len(dropout_probs) - 1 else i - 1)) for i in range(len(dropout_probs))] num_columns = 3 loc_drop_prob = 0.15 glob_drop_ratio = 0.5 net = CIFARFractalNet( channels=channels, num_columns=num_columns, dropout_probs=dropout_probs, loc_drop_prob=loc_drop_prob, glob_drop_ratio=glob_drop_ratio, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def fractalnet_cifar10(num_classes=10, **kwargs): """ FractalNet model for CIFAR-10 from 'FractalNet: Ultra-Deep Neural Networks without Residuals,' https://arxiv.org/abs/1605.07648. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fractalnet_cifar(num_classes=num_classes, model_name="fractalnet_cifar10", **kwargs) def fractalnet_cifar100(num_classes=100, **kwargs): """ FractalNet model for CIFAR-100 from 'FractalNet: Ultra-Deep Neural Networks without Residuals,' https://arxiv.org/abs/1605.07648. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fractalnet_cifar(num_classes=num_classes, model_name="fractalnet_cifar100", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (fractalnet_cifar10, 10), (fractalnet_cifar100, 100), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != fractalnet_cifar10 or weight_count == 33724618) assert (model != fractalnet_cifar100 or weight_count == 33770788) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes)) if __name__ == "__main__": _test()
15,954
31.038153
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/mobilenetv3.py
""" MobileNetV3 for ImageNet-1K, implemented in PyTorch. Original paper: 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. """ __all__ = ['MobileNetV3', 'mobilenetv3_small_w7d20', 'mobilenetv3_small_wd2', 'mobilenetv3_small_w3d4', 'mobilenetv3_small_w1', 'mobilenetv3_small_w5d4', 'mobilenetv3_large_w7d20', 'mobilenetv3_large_wd2', 'mobilenetv3_large_w3d4', 'mobilenetv3_large_w1', 'mobilenetv3_large_w5d4'] import os import torch.nn as nn import torch.nn.init as init from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock,\ HSwish class MobileNetV3Unit(nn.Module): """ MobileNetV3 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. exp_channels : int Number of middle (expanded) channels. stride : int or tuple/list of 2 int Strides of the second convolution layer. use_kernel3 : bool Whether to use 3x3 (instead of 5x5) kernel. activation : str Activation function or name of activation function. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, exp_channels, stride, use_kernel3, activation, use_se): super(MobileNetV3Unit, self).__init__() assert (exp_channels >= out_channels) self.residual = (in_channels == out_channels) and (stride == 1) self.use_se = use_se self.use_exp_conv = exp_channels != out_channels mid_channels = exp_channels if self.use_exp_conv: self.exp_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=activation) if use_kernel3: self.conv1 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=activation) else: self.conv1 = dwconv5x5_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=activation) if self.use_se: self.se = SEBlock( channels=mid_channels, reduction=4, round_mid=True, out_activation="hsigmoid") self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): if self.residual: identity = x if self.use_exp_conv: x = self.exp_conv(x) x = self.conv1(x) if self.use_se: x = self.se(x) x = self.conv2(x) if self.residual: x = x + identity return x class MobileNetV3FinalBlock(nn.Module): """ MobileNetV3 final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, use_se): super(MobileNetV3FinalBlock, self).__init__() self.use_se = use_se self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation="hswish") if self.use_se: self.se = SEBlock( channels=out_channels, reduction=4, round_mid=True, out_activation="hsigmoid") def forward(self, x): x = self.conv(x) if self.use_se: x = self.se(x) return x class MobileNetV3Classifier(nn.Module): """ MobileNetV3 classifier. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels, mid_channels, dropout_rate): super(MobileNetV3Classifier, self).__init__() self.use_dropout = (dropout_rate != 0.0) self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels) self.activ = HSwish(inplace=True) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, bias=True) def forward(self, x): x = self.conv1(x) x = self.activ(x) if self.use_dropout: x = self.dropout(x) x = self.conv2(x) return x class MobileNetV3(nn.Module): """ MobileNetV3 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- channels : list of list of int Number of output channels for each unit. exp_channels : list of list of int Number of middle (expanded) channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. classifier_mid_channels : int Number of middle channels for classifier. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. use_relu : list of list of int/bool Using ReLU activation flag for each unit. use_se : list of list of int/bool Using SE-block flag for each unit. first_stride : bool Whether to use stride for the first stage. final_use_se : bool Whether to use SE-module in the final block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, exp_channels, init_block_channels, final_block_channels, classifier_mid_channels, kernels3, use_relu, use_se, first_stride, final_use_se, in_channels=3, in_size=(224, 224), num_classes=1000): super(MobileNetV3, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2, activation="hswish")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): exp_channels_ij = exp_channels[i][j] stride = 2 if (j == 0) and ((i != 0) or first_stride) else 1 use_kernel3 = kernels3[i][j] == 1 activation = "relu" if use_relu[i][j] == 1 else "hswish" use_se_flag = use_se[i][j] == 1 stage.add_module("unit{}".format(j + 1), MobileNetV3Unit( in_channels=in_channels, out_channels=out_channels, exp_channels=exp_channels_ij, use_kernel3=use_kernel3, stride=stride, activation=activation, use_se=use_se_flag)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", MobileNetV3FinalBlock( in_channels=in_channels, out_channels=final_block_channels, use_se=final_use_se)) in_channels = final_block_channels self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = MobileNetV3Classifier( in_channels=in_channels, out_channels=num_classes, mid_channels=classifier_mid_channels, dropout_rate=0.2) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = self.output(x) x = x.view(x.size(0), -1) return x def get_mobilenetv3(version, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create MobileNetV3 model with specific parameters. Parameters: ---------- version : str Version of MobileNetV3 ('small' or 'large'). width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if version == "small": init_block_channels = 16 channels = [[16], [24, 24], [40, 40, 40, 48, 48], [96, 96, 96]] exp_channels = [[16], [72, 88], [96, 240, 240, 120, 144], [288, 576, 576]] kernels3 = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]] use_relu = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]] use_se = [[1], [0, 0], [1, 1, 1, 1, 1], [1, 1, 1]] first_stride = True final_block_channels = 576 elif version == "large": init_block_channels = 16 channels = [[16], [24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]] exp_channels = [[16], [64, 72], [72, 120, 120], [240, 200, 184, 184, 480, 672], [672, 960, 960]] kernels3 = [[1], [1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]] use_relu = [[1], [1, 1], [1, 1, 1], [0, 0, 0, 0, 0, 0], [0, 0, 0]] use_se = [[0], [0, 0], [1, 1, 1], [0, 0, 0, 0, 1, 1], [1, 1, 1]] first_stride = False final_block_channels = 960 else: raise ValueError("Unsupported MobileNetV3 version {}".format(version)) final_use_se = False classifier_mid_channels = 1280 if width_scale != 1.0: channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels] exp_channels = [[round_channels(cij * width_scale) for cij in ci] for ci in exp_channels] init_block_channels = round_channels(init_block_channels * width_scale) if width_scale > 1.0: final_block_channels = round_channels(final_block_channels * width_scale) net = MobileNetV3( channels=channels, exp_channels=exp_channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, classifier_mid_channels=classifier_mid_channels, kernels3=kernels3, use_relu=use_relu, use_se=use_se, first_stride=first_stride, final_use_se=final_use_se, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def mobilenetv3_small_w7d20(**kwargs): """ MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs) def mobilenetv3_small_wd2(**kwargs): """ MobileNetV3 Small 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.5, model_name="mobilenetv3_small_wd2", **kwargs) def mobilenetv3_small_w3d4(**kwargs): """ MobileNetV3 Small 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.75, model_name="mobilenetv3_small_w3d4", **kwargs) def mobilenetv3_small_w1(**kwargs): """ MobileNetV3 Small 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=1.0, model_name="mobilenetv3_small_w1", **kwargs) def mobilenetv3_small_w5d4(**kwargs): """ MobileNetV3 Small 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=1.25, model_name="mobilenetv3_small_w5d4", **kwargs) def mobilenetv3_large_w7d20(**kwargs): """ MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs) def mobilenetv3_large_wd2(**kwargs): """ MobileNetV3 Large 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.5, model_name="mobilenetv3_large_wd2", **kwargs) def mobilenetv3_large_w3d4(**kwargs): """ MobileNetV3 Large 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.75, model_name="mobilenetv3_large_w3d4", **kwargs) def mobilenetv3_large_w1(**kwargs): """ MobileNetV3 Large 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=1.0, model_name="mobilenetv3_large_w1", **kwargs) def mobilenetv3_large_w5d4(**kwargs): """ MobileNetV3 Large 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=1.25, model_name="mobilenetv3_large_w5d4", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ mobilenetv3_small_w7d20, mobilenetv3_small_wd2, mobilenetv3_small_w3d4, mobilenetv3_small_w1, mobilenetv3_small_w5d4, mobilenetv3_large_w7d20, mobilenetv3_large_wd2, mobilenetv3_large_w3d4, mobilenetv3_large_w1, mobilenetv3_large_w5d4, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != mobilenetv3_small_w7d20 or weight_count == 2159600) assert (model != mobilenetv3_small_wd2 or weight_count == 2288976) assert (model != mobilenetv3_small_w3d4 or weight_count == 2581312) assert (model != mobilenetv3_small_w1 or weight_count == 2945288) assert (model != mobilenetv3_small_w5d4 or weight_count == 3643632) assert (model != mobilenetv3_large_w7d20 or weight_count == 2943080) assert (model != mobilenetv3_large_wd2 or weight_count == 3334896) assert (model != mobilenetv3_large_w3d4 or weight_count == 4263496) assert (model != mobilenetv3_large_w1 or weight_count == 5481752) assert (model != mobilenetv3_large_w5d4 or weight_count == 7459144) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
18,999
33.234234
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/diaresnet.py
""" DIA-ResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. """ __all__ = ['DIAResNet', 'diaresnet10', 'diaresnet12', 'diaresnet14', 'diaresnetbc14b', 'diaresnet16', 'diaresnet18', 'diaresnet26', 'diaresnetbc26b', 'diaresnet34', 'diaresnetbc38b', 'diaresnet50', 'diaresnet50b', 'diaresnet101', 'diaresnet101b', 'diaresnet152', 'diaresnet152b', 'diaresnet200', 'diaresnet200b', 'DIAAttention', 'DIAResUnit'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, DualPathSequential from .resnet import ResBlock, ResBottleneck, ResInitBlock class FirstLSTMAmp(nn.Module): """ First LSTM amplifier branch. Parameters: ---------- in_features : int Number of input channels. out_features : int Number of output channels. """ def __init__(self, in_features, out_features): super(FirstLSTMAmp, self).__init__() mid_features = in_features // 4 self.fc1 = nn.Linear( in_features=in_features, out_features=mid_features) self.activ = nn.ReLU(inplace=True) self.fc2 = nn.Linear( in_features=mid_features, out_features=out_features) def forward(self, x): x = self.fc1(x) x = self.activ(x) x = self.fc2(x) return x class DIALSTMCell(nn.Module): """ DIA-LSTM cell. Parameters: ---------- in_x_features : int Number of x input channels. in_h_features : int Number of h input channels. num_layers : int Number of amplifiers. dropout_rate : float, default 0.1 Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_x_features, in_h_features, num_layers, dropout_rate=0.1): super(DIALSTMCell, self).__init__() self.num_layers = num_layers out_features = 4 * in_h_features self.x_amps = nn.Sequential() self.h_amps = nn.Sequential() for i in range(num_layers): amp_class = FirstLSTMAmp if i == 0 else nn.Linear self.x_amps.add_module("amp{}".format(i + 1), amp_class( in_features=in_x_features, out_features=out_features)) self.h_amps.add_module("amp{}".format(i + 1), amp_class( in_features=in_h_features, out_features=out_features)) in_x_features = in_h_features self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x, h, c): hy = [] cy = [] for i in range(self.num_layers): hx_i = h[i] cx_i = c[i] gates = self.x_amps[i](x) + self.h_amps[i](hx_i) i_gate, f_gate, c_gate, o_gate = gates.chunk(chunks=4, dim=1) i_gate = torch.sigmoid(i_gate) f_gate = torch.sigmoid(f_gate) c_gate = torch.tanh(c_gate) o_gate = torch.sigmoid(o_gate) cy_i = (f_gate * cx_i) + (i_gate * c_gate) hy_i = o_gate * torch.sigmoid(cy_i) cy.append(cy_i) hy.append(hy_i) x = self.dropout(hy_i) return hy, cy class DIAAttention(nn.Module): """ DIA-Net attention module. Parameters: ---------- in_x_features : int Number of x input channels. in_h_features : int Number of h input channels. num_layers : int, default 1 Number of amplifiers. """ def __init__(self, in_x_features, in_h_features, num_layers=1): super(DIAAttention, self).__init__() self.num_layers = num_layers self.pool = nn.AdaptiveAvgPool2d(output_size=1) self.lstm = DIALSTMCell( in_x_features=in_x_features, in_h_features=in_h_features, num_layers=num_layers) def forward(self, x, hc=None): w = self.pool(x) w = w.view(w.size(0), -1) if hc is None: h = [torch.zeros_like(w)] * self.num_layers c = [torch.zeros_like(w)] * self.num_layers else: h, c = hc h, c = self.lstm(w, h, c) w = h[-1].unsqueeze(dim=-1).unsqueeze(dim=-1) x = x * w return x, (h, c) class DIAResUnit(nn.Module): """ DIA-ResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for the second convolution layer in bottleneck. dilation : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer in bottleneck. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. attention : nn.Module, default None Attention module. """ def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, bottleneck=True, conv1_stride=False, attention=None): super(DIAResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, padding=padding, dilation=dilation, conv1_stride=conv1_stride) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) self.attention = attention def forward(self, x, hc=None): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x, hc = self.attention(x, hc) x = x + identity x = self.activ(x) return x, hc class DIAResNet(nn.Module): """ DIA-ResNet model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), num_classes=1000): super(DIAResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential(return_two=False) attention = DIAAttention( in_x_features=channels_per_stage[0], in_h_features=channels_per_stage[0]) for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), DIAResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride, attention=attention)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_diaresnet(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DIA-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported DIA-ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = DIAResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def diaresnet10(**kwargs): """ DIA-ResNet-10 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=10, model_name="diaresnet10", **kwargs) def diaresnet12(**kwargs): """ DIA-ResNet-12 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=12, model_name="diaresnet12", **kwargs) def diaresnet14(**kwargs): """ DIA-ResNet-14 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=14, model_name="diaresnet14", **kwargs) def diaresnetbc14b(**kwargs): """ DIA-ResNet-BC-14b model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="diaresnetbc14b", **kwargs) def diaresnet16(**kwargs): """ DIA-ResNet-16 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=16, model_name="diaresnet16", **kwargs) def diaresnet18(**kwargs): """ DIA-ResNet-18 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=18, model_name="diaresnet18", **kwargs) def diaresnet26(**kwargs): """ DIA-ResNet-26 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=26, bottleneck=False, model_name="diaresnet26", **kwargs) def diaresnetbc26b(**kwargs): """ DIA-ResNet-BC-26b model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="diaresnetbc26b", **kwargs) def diaresnet34(**kwargs): """ DIA-ResNet-34 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=34, model_name="diaresnet34", **kwargs) def diaresnetbc38b(**kwargs): """ DIA-ResNet-BC-38b model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="diaresnetbc38b", **kwargs) def diaresnet50(**kwargs): """ DIA-ResNet-50 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=50, model_name="diaresnet50", **kwargs) def diaresnet50b(**kwargs): """ DIA-ResNet-50 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=50, conv1_stride=False, model_name="diaresnet50b", **kwargs) def diaresnet101(**kwargs): """ DIA-ResNet-101 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=101, model_name="diaresnet101", **kwargs) def diaresnet101b(**kwargs): """ DIA-ResNet-101 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=101, conv1_stride=False, model_name="diaresnet101b", **kwargs) def diaresnet152(**kwargs): """ DIA-ResNet-152 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=152, model_name="diaresnet152", **kwargs) def diaresnet152b(**kwargs): """ DIA-ResNet-152 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=152, conv1_stride=False, model_name="diaresnet152b", **kwargs) def diaresnet200(**kwargs): """ DIA-ResNet-200 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=200, model_name="diaresnet200", **kwargs) def diaresnet200b(**kwargs): """ DIA-ResNet-200 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet(blocks=200, conv1_stride=False, model_name="diaresnet200b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ diaresnet10, diaresnet12, diaresnet14, diaresnetbc14b, diaresnet16, diaresnet18, diaresnet26, diaresnetbc26b, diaresnet34, diaresnetbc38b, diaresnet50, diaresnet50b, diaresnet101, diaresnet101b, diaresnet152, diaresnet152b, diaresnet200, diaresnet200b, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != diaresnet10 or weight_count == 6297352) assert (model != diaresnet12 or weight_count == 6371336) assert (model != diaresnet14 or weight_count == 6666760) assert (model != diaresnetbc14b or weight_count == 24023976) assert (model != diaresnet16 or weight_count == 7847432) assert (model != diaresnet18 or weight_count == 12568072) assert (model != diaresnet26 or weight_count == 18838792) assert (model != diaresnetbc26b or weight_count == 29954216) assert (model != diaresnet34 or weight_count == 22676232) assert (model != diaresnetbc38b or weight_count == 35884456) assert (model != diaresnet50 or weight_count == 39516072) assert (model != diaresnet50b or weight_count == 39516072) assert (model != diaresnet101 or weight_count == 58508200) assert (model != diaresnet101b or weight_count == 58508200) assert (model != diaresnet152 or weight_count == 74151848) assert (model != diaresnet152b or weight_count == 74151848) assert (model != diaresnet200 or weight_count == 78632872) assert (model != diaresnet200b or weight_count == 78632872) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
24,132
32.058904
116
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/lffd.py
""" LFFD for face detection, implemented in PyTorch. Original paper: 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633. """ __all__ = ['LFFD', 'lffd20x5s320v2_widerface', 'lffd25x8s560v1_widerface'] import os import torch.nn as nn from .common import conv3x3, conv1x1_block, conv3x3_block, Concurrent, MultiOutputSequential, ParallelConcurent from .resnet import ResUnit from .preresnet import PreResUnit class LffdDetectionBranch(nn.Module): """ LFFD specific detection branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bias : bool Whether the layer uses a bias vector. use_bn : bool Whether to use BatchNorm layer. """ def __init__(self, in_channels, out_channels, bias, use_bn): super(LffdDetectionBranch, self).__init__() self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=in_channels, bias=bias, use_bn=use_bn) self.conv2 = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=bias, use_bn=use_bn, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class LffdDetectionBlock(nn.Module): """ LFFD specific detection block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. bias : bool Whether the layer uses a bias vector. use_bn : bool Whether to use BatchNorm layer. """ def __init__(self, in_channels, mid_channels, bias, use_bn): super(LffdDetectionBlock, self).__init__() self.conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bias=bias, use_bn=use_bn) self.branches = Concurrent() self.branches.add_module("bbox_branch", LffdDetectionBranch( in_channels=mid_channels, out_channels=4, bias=bias, use_bn=use_bn)) self.branches.add_module("score_branch", LffdDetectionBranch( in_channels=mid_channels, out_channels=2, bias=bias, use_bn=use_bn)) def forward(self, x): x = self.conv(x) x = self.branches(x) return x class LFFD(nn.Module): """ LFFD model from 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633. Parameters: ---------- enc_channels : list of int Number of output channels for each encoder stage. dec_channels : int Number of output channels for each decoder stage. init_block_channels : int Number of output channels for the initial encoder unit. layers : list of int Number of units in each encoder stage. int_bends : list of int Number of internal bends for each encoder stage. use_preresnet : bool Whether to use PreResnet backbone instead of ResNet. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (640, 640) Spatial size of the expected input image. """ def __init__(self, enc_channels, dec_channels, init_block_channels, layers, int_bends, use_preresnet, in_channels=3, in_size=(640, 640)): super(LFFD, self).__init__() self.in_size = in_size unit_class = PreResUnit if use_preresnet else ResUnit bias = True use_bn = False self.encoder = MultiOutputSequential(return_last=False) self.encoder.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2, padding=0, bias=bias, use_bn=use_bn)) in_channels = init_block_channels for i, channels_per_stage in enumerate(enc_channels): layers_per_stage = layers[i] int_bends_per_stage = int_bends[i] stage = MultiOutputSequential(multi_output=False, dual_output=True) stage.add_module("trans{}".format(i + 1), conv3x3( in_channels=in_channels, out_channels=channels_per_stage, stride=2, padding=0, bias=bias)) for j in range(layers_per_stage): unit = unit_class( in_channels=channels_per_stage, out_channels=channels_per_stage, stride=1, bias=bias, use_bn=use_bn, bottleneck=False) if layers_per_stage - j <= int_bends_per_stage: unit.do_output = True stage.add_module("unit{}".format(j + 1), unit) final_activ = nn.ReLU(inplace=True) final_activ.do_output = True stage.add_module("final_activ", final_activ) stage.do_output2 = True in_channels = channels_per_stage self.encoder.add_module("stage{}".format(i + 1), stage) self.decoder = ParallelConcurent() k = 0 for i, channels_per_stage in enumerate(enc_channels): layers_per_stage = layers[i] int_bends_per_stage = int_bends[i] for j in range(layers_per_stage): if layers_per_stage - j <= int_bends_per_stage: self.decoder.add_module("unit{}".format(k + 1), LffdDetectionBlock( in_channels=channels_per_stage, mid_channels=dec_channels, bias=bias, use_bn=use_bn)) k += 1 self.decoder.add_module("unit{}".format(k + 1), LffdDetectionBlock( in_channels=channels_per_stage, mid_channels=dec_channels, bias=bias, use_bn=use_bn)) k += 1 self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.encoder(x) x = self.decoder(x) return x def get_lffd(blocks, use_preresnet, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create LFFD model with specific parameters. Parameters: ---------- blocks : int Number of blocks. use_preresnet : bool Whether to use PreResnet backbone instead of ResNet. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 20: layers = [3, 1, 1, 1, 1] enc_channels = [64, 64, 64, 128, 128] int_bends = [0, 0, 0, 0, 0] elif blocks == 25: layers = [4, 2, 1, 3] enc_channels = [64, 64, 128, 128] int_bends = [1, 1, 0, 2] else: raise ValueError("Unsupported LFFD with number of blocks: {}".format(blocks)) dec_channels = 128 init_block_channels = 64 net = LFFD( enc_channels=enc_channels, dec_channels=dec_channels, init_block_channels=init_block_channels, layers=layers, int_bends=int_bends, use_preresnet=use_preresnet, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def lffd20x5s320v2_widerface(**kwargs): """ LFFD-320-20L-5S-V2 model for WIDER FACE from 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_lffd(blocks=20, use_preresnet=True, model_name="lffd20x5s320v2_widerface", **kwargs) def lffd25x8s560v1_widerface(**kwargs): """ LFFD-560-25L-8S-V1 model for WIDER FACE from 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_lffd(blocks=25, use_preresnet=False, model_name="lffd25x8s560v1_widerface", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch in_size = (640, 640) pretrained = False models = [ (lffd20x5s320v2_widerface, 5), (lffd25x8s560v1_widerface, 8), ] for model, num_outs in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != lffd20x5s320v2_widerface or weight_count == 1520606) assert (model != lffd25x8s560v1_widerface or weight_count == 2290608) batch = 14 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) assert (len(y) == num_outs) if __name__ == "__main__": _test()
10,582
30.685629
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/sepreresnet.py
""" SE-PreResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['SEPreResNet', 'sepreresnet10', 'sepreresnet12', 'sepreresnet14', 'sepreresnet16', 'sepreresnet18', 'sepreresnet26', 'sepreresnetbc26b', 'sepreresnet34', 'sepreresnetbc38b', 'sepreresnet50', 'sepreresnet50b', 'sepreresnet101', 'sepreresnet101b', 'sepreresnet152', 'sepreresnet152b', 'sepreresnet200', 'sepreresnet200b', 'SEPreResUnit'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1, SEBlock from .preresnet import PreResBlock, PreResBottleneck, PreResInitBlock, PreResActivation class SEPreResUnit(nn.Module): """ SE-PreResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, bottleneck, conv1_stride): super(SEPreResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = PreResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride) else: self.body = PreResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) self.se = SEBlock(channels=out_channels) if self.resize_identity: self.identity_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, stride=stride) def forward(self, x): identity = x x, x_pre_activ = self.body(x) x = self.se(x) if self.resize_identity: identity = self.identity_conv(x_pre_activ) x = x + identity return x class SEPreResNet(nn.Module): """ SE-PreResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), num_classes=1000): super(SEPreResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 1 if (i == 0) or (j != 0) else 2 stage.add_module("unit{}".format(j + 1), SEPreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_sepreresnet(blocks, bottleneck=None, conv1_stride=True, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SE-PreResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] elif blocks == 269: layers = [3, 30, 48, 8] else: raise ValueError("Unsupported SE-PreResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = SEPreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def sepreresnet10(**kwargs): """ SE-PreResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=10, model_name="sepreresnet10", **kwargs) def sepreresnet12(**kwargs): """ SE-PreResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=12, model_name="sepreresnet12", **kwargs) def sepreresnet14(**kwargs): """ SE-PreResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=14, model_name="sepreresnet14", **kwargs) def sepreresnet16(**kwargs): """ SE-PreResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=16, model_name="sepreresnet16", **kwargs) def sepreresnet18(**kwargs): """ SE-PreResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=18, model_name="sepreresnet18", **kwargs) def sepreresnet26(**kwargs): """ SE-PreResNet-26 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=26, bottleneck=False, model_name="sepreresnet26", **kwargs) def sepreresnetbc26b(**kwargs): """ SE-PreResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc26b", **kwargs) def sepreresnet34(**kwargs): """ SE-PreResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=34, model_name="sepreresnet34", **kwargs) def sepreresnetbc38b(**kwargs): """ SE-PreResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc38b", **kwargs) def sepreresnet50(**kwargs): """ SE-PreResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=50, model_name="sepreresnet50", **kwargs) def sepreresnet50b(**kwargs): """ SE-PreResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=50, conv1_stride=False, model_name="sepreresnet50b", **kwargs) def sepreresnet101(**kwargs): """ SE-PreResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=101, model_name="sepreresnet101", **kwargs) def sepreresnet101b(**kwargs): """ SE-PreResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=101, conv1_stride=False, model_name="sepreresnet101b", **kwargs) def sepreresnet152(**kwargs): """ SE-PreResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=152, model_name="sepreresnet152", **kwargs) def sepreresnet152b(**kwargs): """ SE-PreResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=152, conv1_stride=False, model_name="sepreresnet152b", **kwargs) def sepreresnet200(**kwargs): """ SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs) def sepreresnet200b(**kwargs): """ SE-PreResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=200, conv1_stride=False, model_name="sepreresnet200b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ sepreresnet10, sepreresnet12, sepreresnet14, sepreresnet16, sepreresnet18, sepreresnet26, sepreresnetbc26b, sepreresnet34, sepreresnetbc38b, sepreresnet50, sepreresnet50b, sepreresnet101, sepreresnet101b, sepreresnet152, sepreresnet152b, sepreresnet200, sepreresnet200b, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != sepreresnet10 or weight_count == 5461668) assert (model != sepreresnet12 or weight_count == 5536232) assert (model != sepreresnet14 or weight_count == 5833840) assert (model != sepreresnet16 or weight_count == 7022976) assert (model != sepreresnet18 or weight_count == 11776928) assert (model != sepreresnet26 or weight_count == 18092188) assert (model != sepreresnetbc26b or weight_count == 17388424) assert (model != sepreresnet34 or weight_count == 21957204) assert (model != sepreresnetbc38b or weight_count == 24019064) assert (model != sepreresnet50 or weight_count == 28080472) assert (model != sepreresnet50b or weight_count == 28080472) assert (model != sepreresnet101 or weight_count == 49319320) assert (model != sepreresnet101b or weight_count == 49319320) assert (model != sepreresnet152 or weight_count == 66814296) assert (model != sepreresnet152b or weight_count == 66814296) assert (model != sepreresnet200 or weight_count == 71828312) assert (model != sepreresnet200b or weight_count == 71828312) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
18,420
32.371377
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/resnext.py
""" ResNeXt for ImageNet-1K, implemented in PyTorch. Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. """ __all__ = ['ResNeXt', 'resnext14_16x4d', 'resnext14_32x2d', 'resnext14_32x4d', 'resnext26_16x4d', 'resnext26_32x2d', 'resnext26_32x4d', 'resnext38_32x4d', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d', 'ResNeXtBottleneck', 'ResNeXtUnit'] import os import math import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block from .resnet import ResInitBlock class ResNeXtBottleneck(nn.Module): """ ResNeXt bottleneck block for residual path in ResNeXt unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width, bottleneck_factor=4): super(ResNeXtBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor D = int(math.floor(mid_channels * (bottleneck_width / 64.0))) group_width = cardinality * D self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=group_width) self.conv2 = conv3x3_block( in_channels=group_width, out_channels=group_width, stride=stride, groups=cardinality) self.conv3 = conv1x1_block( in_channels=group_width, out_channels=out_channels, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class ResNeXtUnit(nn.Module): """ ResNeXt unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. """ def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width): super(ResNeXtUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = ResNeXtBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class ResNeXt(nn.Module): """ ResNeXt model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(224, 224), num_classes=1000): super(ResNeXt, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), ResNeXtUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_resnext(blocks, cardinality, bottleneck_width, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ResNeXt model with specific parameters. Parameters: ---------- blocks : int Number of blocks. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 14: layers = [1, 1, 1, 1] elif blocks == 26: layers = [2, 2, 2, 2] elif blocks == 38: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] else: raise ValueError("Unsupported ResNeXt with number of blocks: {}".format(blocks)) assert (sum(layers) * 3 + 2 == blocks) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = ResNeXt( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def resnext14_16x4d(**kwargs): """ ResNeXt-14 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext(blocks=14, cardinality=16, bottleneck_width=4, model_name="resnext14_16x4d", **kwargs) def resnext14_32x2d(**kwargs): """ ResNeXt-14 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext(blocks=14, cardinality=32, bottleneck_width=2, model_name="resnext14_32x2d", **kwargs) def resnext14_32x4d(**kwargs): """ ResNeXt-14 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext(blocks=14, cardinality=32, bottleneck_width=4, model_name="resnext14_32x4d", **kwargs) def resnext26_16x4d(**kwargs): """ ResNeXt-26 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext(blocks=26, cardinality=16, bottleneck_width=4, model_name="resnext26_16x4d", **kwargs) def resnext26_32x2d(**kwargs): """ ResNeXt-26 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext(blocks=26, cardinality=32, bottleneck_width=2, model_name="resnext26_32x2d", **kwargs) def resnext26_32x4d(**kwargs): """ ResNeXt-26 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext(blocks=26, cardinality=32, bottleneck_width=4, model_name="resnext26_32x4d", **kwargs) def resnext38_32x4d(**kwargs): """ ResNeXt-38 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext(blocks=38, cardinality=32, bottleneck_width=4, model_name="resnext38_32x4d", **kwargs) def resnext50_32x4d(**kwargs): """ ResNeXt-50 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="resnext50_32x4d", **kwargs) def resnext101_32x4d(**kwargs): """ ResNeXt-101 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="resnext101_32x4d", **kwargs) def resnext101_64x4d(**kwargs): """ ResNeXt-101 (64x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="resnext101_64x4d", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ resnext14_16x4d, resnext14_32x2d, resnext14_32x4d, resnext26_16x4d, resnext26_32x2d, resnext26_32x4d, resnext38_32x4d, resnext50_32x4d, resnext101_32x4d, resnext101_64x4d, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnext14_16x4d or weight_count == 7127336) assert (model != resnext14_32x2d or weight_count == 7029416) assert (model != resnext14_32x4d or weight_count == 9411880) assert (model != resnext26_16x4d or weight_count == 10119976) assert (model != resnext26_32x2d or weight_count == 9924136) assert (model != resnext26_32x4d or weight_count == 15389480) assert (model != resnext38_32x4d or weight_count == 21367080) assert (model != resnext50_32x4d or weight_count == 25028904) assert (model != resnext101_32x4d or weight_count == 44177704) assert (model != resnext101_64x4d or weight_count == 83455272) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
14,857
31.090713
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/jasper.py
""" Jasper/DR for ASR, implemented in PyTorch. Original paper: 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. """ __all__ = ['Jasper', 'jasper5x3', 'jasper10x4', 'jasper10x5', 'get_jasper', 'MaskConv1d', 'NemoAudioReader', 'NemoMelSpecExtractor', 'CtcDecoder'] import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from .common import DualPathSequential, DualPathParallelConcurent def outmask_fill(x, x_len, value=0.0): """ Masked fill a tensor. Parameters: ---------- x : tensor Input tensor. x_len : tensor Tensor with lengths. value : float, default 0.0 Filled value. Returns: ------- tensor Resulted tensor. """ max_len = x.size(2) mask = torch.arange(max_len).to(x_len.device).expand(len(x_len), max_len) >= x_len.unsqueeze(1) mask = mask.unsqueeze(dim=1).to(device=x.device) x = x.masked_fill(mask=mask, value=value) return x def masked_normalize(x, x_len): """ Normalize a tensor with mask. Parameters: ---------- x : tensor Input tensor. x_len : tensor Tensor with lengths. Returns: ------- tensor Resulted tensor. """ x = outmask_fill(x, x_len) x_mean = x.sum(dim=2) / x_len.unsqueeze(dim=1) x_m0 = x - x_mean.unsqueeze(dim=2) x_m0 = outmask_fill(x_m0, x_len) x_std = x_m0.sum(dim=2) / x_len.unsqueeze(dim=1) x = x_m0 / x_std.unsqueeze(dim=2) return x def masked_normalize2(x, x_len): """ Normalize a tensor with mask (scheme #2). Parameters: ---------- x : tensor Input tensor. x_len : tensor Tensor with lengths. Returns: ------- tensor Resulted tensor. """ x = outmask_fill(x, x_len) x_mean = x.sum(dim=2) / x_len.unsqueeze(dim=1) x2_mean = x.square().sum(dim=2) / x_len.unsqueeze(dim=1) x_std = (x2_mean - x_mean.square()).sqrt() x = (x - x_mean.unsqueeze(dim=2)) / x_std.unsqueeze(dim=2) return x def masked_normalize3(x, x_len): """ Normalize a tensor with mask (scheme #3). Parameters: ---------- x : tensor Input tensor. x_len : tensor Tensor with lengths. Returns: ------- tensor Resulted tensor. """ x_eps = 1e-5 x_mean = torch.zeros(x.shape[:2], dtype=x.dtype, device=x.device) x_std = torch.zeros(x.shape[:2], dtype=x.dtype, device=x.device) for i in range(x.shape[0]): x_mean[i, :] = x[i, :, : x_len[i]].mean(dim=1) x_std[i, :] = x[i, :, : x_len[i]].std(dim=1) x_std += x_eps return (x - x_mean.unsqueeze(dim=2)) / x_std.unsqueeze(dim=2) class NemoAudioReader(object): """ Audio Reader from NVIDIA NEMO toolkit. Parameters: ---------- desired_audio_sample_rate : int, default 16000 Desired audio sample rate. trunc_value : int or None, default None Value to truncate. """ def __init__(self, desired_audio_sample_rate=16000): super(NemoAudioReader, self).__init__() self.desired_audio_sample_rate = desired_audio_sample_rate def read_from_file(self, audio_file_path): """ Read audio from file. Parameters: ---------- audio_file_path : str Path to audio file. Returns: ------- np.array Audio data. """ from soundfile import SoundFile with SoundFile(audio_file_path, "r") as data: sample_rate = data.samplerate audio_data = data.read(dtype="float32") audio_data = audio_data.transpose() if sample_rate != self.desired_audio_sample_rate: from librosa.core import resample as lr_resample audio_data = lr_resample(y=audio_data, orig_sr=sample_rate, target_sr=self.desired_audio_sample_rate) if audio_data.ndim >= 2: audio_data = np.mean(audio_data, axis=1) return audio_data def read_from_files(self, audio_file_paths): """ Read audios from files. Parameters: ---------- audio_file_paths : list of str Paths to audio files. Returns: ------- list of np.array Audio data. """ assert (type(audio_file_paths) in (list, tuple)) audio_data_list = [] for audio_file_path in audio_file_paths: audio_data = self.read_from_file(audio_file_path) audio_data_list.append(audio_data) return audio_data_list class NemoMelSpecExtractor(nn.Module): """ Mel-Spectrogram Extractor from NVIDIA NEMO toolkit. Parameters: ---------- sample_rate : int, default 16000 Sample rate of the input audio data. window_size_sec : float, default 0.02 Size of window for FFT in seconds. window_stride_sec : float, default 0.01 Stride of window for FFT in seconds. n_fft : int, default 512 Length of FT window. n_filters : int, default 64 Number of Mel spectrogram freq bins. preemph : float, default 0.97 Amount of pre emphasis to add to audio. dither : float, default 1.0e-05 Amount of white-noise dithering. """ def __init__(self, sample_rate=16000, window_size_sec=0.02, window_stride_sec=0.01, n_fft=512, n_filters=64, preemph=0.97, dither=1.0e-5): super(NemoMelSpecExtractor, self).__init__() self.log_zero_guard_value = 2 ** -24 win_length = int(window_size_sec * sample_rate) self.hop_length = int(window_stride_sec * sample_rate) self.n_filters = n_filters window_tensor = torch.hann_window(win_length, periodic=False) self.register_buffer("window", window_tensor) self.stft = lambda x: torch.stft( x, n_fft=n_fft, hop_length=self.hop_length, win_length=win_length, window=self.window.to(dtype=torch.float), center=True) self.dither = dither self.preemph = preemph self.pad_align = 16 from librosa.filters import mel as librosa_mel filter_bank = librosa_mel( sr=sample_rate, n_fft=n_fft, n_mels=n_filters, fmin=0.0, fmax=(sample_rate / 2.0)) fb_tensor = torch.from_numpy(filter_bank).unsqueeze(0) self.register_buffer("fb", fb_tensor) def forward(self, x, x_len): """ Preprocess audio. Parameters: ---------- xs : list of np.array Audio data. Returns: ------- x : np.array Audio data. x_len : np.array Audio data lengths. """ x_len = torch.ceil(x_len.float() / self.hop_length).long() if self.dither > 0: x += self.dither * torch.randn_like(x) x = torch.cat((x[:, :1], x[:, 1:] - self.preemph * x[:, :-1]), dim=1) with torch.cuda.amp.autocast(enabled=False): x = self.stft(x) x = x.pow(2).sum(-1) x = torch.matmul(self.fb.to(x.dtype), x) x = torch.log(x + self.log_zero_guard_value) x = masked_normalize2(x, x_len) x = outmask_fill(x, x_len) x_len_max = x.size(-1) pad_rem = x_len_max % self.pad_align if pad_rem != 0: x = F.pad(x, pad=(0, self.pad_align - pad_rem)) return x, x_len def calc_flops(self, x): assert (x.shape[0] == 1) num_flops = x.numel() num_macs = 0 return num_flops, num_macs class CtcDecoder(object): """ CTC decoder (to decode a sequence of labels to words). Parameters: ---------- vocabulary : list of str Vocabulary of the dataset. """ def __init__(self, vocabulary): super().__init__() self.blank_id = len(vocabulary) self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))]) def __call__(self, predictions): """ Decode a sequence of labels to words. Parameters: ---------- predictions : np.array of int or list of list of int Tensor with predicted labels. Returns: ------- list of str Words. """ hypotheses = [] for prediction in predictions: decoded_prediction = [] previous = self.blank_id for p in prediction: if (p != previous or previous == self.blank_id) and p != self.blank_id: decoded_prediction.append(p) previous = p hypothesis = "".join([self.labels_map[c] for c in decoded_prediction]) hypotheses.append(hypothesis) return hypotheses def conv1d1(in_channels, out_channels, stride=1, groups=1, bias=False): """ 1-dim kernel version of the 1D convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int, default 1 Strides of the convolution. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. """ return nn.Conv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, groups=groups, bias=bias) class MaskConv1d(nn.Conv1d): """ Masked 1D convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 1 int Convolution window size. stride : int or tuple/list of 1 int Strides of the convolution. padding : int or tuple/list of 1 int, default 0 Padding value for convolution layer. dilation : int or tuple/list of 1 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_mask : bool, default True Whether to use mask. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding=0, dilation=1, groups=1, bias=False, use_mask=True): super(MaskConv1d, self).__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.use_mask = use_mask def forward(self, x, x_len): if self.use_mask: x = outmask_fill(x, x_len) x_len = (x_len + 2 * self.padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) // self.stride[0] + 1 x = F.conv1d( input=x, weight=self.weight, bias=self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) return x, x_len def mask_conv1d1(in_channels, out_channels, stride=1, groups=1, bias=False): """ Masked 1-dim kernel version of the 1D convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int, default 1 Strides of the convolution. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. """ return MaskConv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, groups=groups, bias=bias) class MaskConvBlock1d(nn.Module): """ Masked 1D convolution block with batch normalization, activation, and dropout. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int Convolution window size. stride : int Strides of the convolution. padding : int Padding value for convolution layer. dilation : int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True)), dropout_rate=0.0): super(MaskConvBlock1d, self).__init__() self.activate = (activation is not None) self.use_bn = use_bn self.use_dropout = (dropout_rate != 0.0) self.conv = MaskConv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) if self.use_bn: self.bn = nn.BatchNorm1d( num_features=out_channels, eps=bn_eps) if self.activate: self.activ = activation() if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x, x_len): x, x_len = self.conv(x, x_len) if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) if self.use_dropout: x = self.dropout(x) return x, x_len def mask_conv1d1_block(in_channels, out_channels, stride=1, padding=0, **kwargs): """ 1-dim kernel version of the masked 1D convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int, default 1 Strides of the convolution. padding : int, default 0 Padding value for convolution layer. """ return MaskConvBlock1d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=padding, **kwargs) class ChannelShuffle1d(nn.Module): """ 1D version of the channel shuffle layer. Parameters: ---------- channels : int Number of channels. groups : int Number of groups. """ def __init__(self, channels, groups): super(ChannelShuffle1d, self).__init__() assert (channels % groups == 0) self.groups = groups def forward(self, x): batch, channels, seq_len = x.size() channels_per_group = channels // self.groups x = x.view(batch, self.groups, channels_per_group, seq_len) x = torch.transpose(x, 1, 2).contiguous() x = x.view(batch, channels, seq_len) return x def __repr__(self): s = "{name}(groups={groups})" return s.format( name=self.__class__.__name__, groups=self.groups) class DwsConvBlock1d(nn.Module): """ Depthwise version of the 1D standard convolution block with batch normalization, activation, dropout, and channel shuffle. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int Convolution window size. stride : int Strides of the convolution. padding : int Padding value for convolution layer. dilation : int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True)), dropout_rate=0.0): super(DwsConvBlock1d, self).__init__() self.activate = (activation is not None) self.use_bn = use_bn self.use_dropout = (dropout_rate != 0.0) self.use_channel_shuffle = (groups > 1) self.dw_conv = MaskConv1d( in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=bias) self.pw_conv = mask_conv1d1( in_channels=in_channels, out_channels=out_channels, groups=groups, bias=bias) if self.use_channel_shuffle: self.shuffle = ChannelShuffle1d( channels=out_channels, groups=groups) if self.use_bn: self.bn = nn.BatchNorm1d( num_features=out_channels, eps=bn_eps) if self.activate: self.activ = activation() if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x, x_len): x, x_len = self.dw_conv(x, x_len) x, x_len = self.pw_conv(x, x_len) if self.use_channel_shuffle: x = self.shuffle(x) if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) if self.use_dropout: x = self.dropout(x) return x, x_len class JasperUnit(nn.Module): """ Jasper unit with residual connection. Parameters: ---------- in_channels : int or list of int Number of input channels. out_channels : int Number of output channels. kernel_size : int Convolution window size. bn_eps : float Small float added to variance in Batch norm. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. repeat : int Count of body convolution blocks. use_dw : bool Whether to use depthwise block. use_dr : bool Whether to use dense residual scheme. """ def __init__(self, in_channels, out_channels, kernel_size, bn_eps, dropout_rate, repeat, use_dw, use_dr): super(JasperUnit, self).__init__() self.use_dropout = (dropout_rate != 0.0) self.use_dr = use_dr block_class = DwsConvBlock1d if use_dw else MaskConvBlock1d if self.use_dr: self.identity_block = DualPathParallelConcurent() for i, dense_in_channels_i in enumerate(in_channels): self.identity_block.add_module("block{}".format(i + 1), mask_conv1d1_block( in_channels=dense_in_channels_i, out_channels=out_channels, bn_eps=bn_eps, dropout_rate=0.0, activation=None)) in_channels = in_channels[-1] else: self.identity_block = mask_conv1d1_block( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps, dropout_rate=0.0, activation=None) self.body = DualPathSequential() for i in range(repeat): activation = (lambda: nn.ReLU(inplace=True)) if i < repeat - 1 else None dropout_rate_i = dropout_rate if i < repeat - 1 else 0.0 self.body.add_module("block{}".format(i + 1), block_class( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size // 2), bn_eps=bn_eps, dropout_rate=dropout_rate_i, activation=activation)) in_channels = out_channels self.activ = nn.ReLU(inplace=True) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x, x_len): if self.use_dr: x_len, y, y_len = x_len if type(x_len) is tuple else (x_len, None, None) y = [x] if y is None else y + [x] y_len = [x_len] if y_len is None else y_len + [x_len] identity, _ = self.identity_block(y, y_len) identity = torch.stack(tuple(identity), dim=1) identity = identity.sum(dim=1) else: identity, _ = self.identity_block(x, x_len) x, x_len = self.body(x, x_len) x = x + identity x = self.activ(x) if self.use_dropout: x = self.dropout(x) if self.use_dr: return x, (x_len, y, y_len) else: return x, x_len class JasperFinalBlock(nn.Module): """ Jasper specific final block. Parameters: ---------- in_channels : int Number of input channels. channels : list of int Number of output channels for each block. kernel_sizes : list of int Kernel sizes for each block. bn_eps : float Small float added to variance in Batch norm. dropout_rates : list of int Dropout rates for each block. use_dw : bool Whether to use depthwise block. use_dr : bool Whether to use dense residual scheme. """ def __init__(self, in_channels, channels, kernel_sizes, bn_eps, dropout_rates, use_dw, use_dr): super(JasperFinalBlock, self).__init__() self.use_dr = use_dr conv1_class = DwsConvBlock1d if use_dw else MaskConvBlock1d self.conv1 = conv1_class( in_channels=in_channels, out_channels=channels[-2], kernel_size=kernel_sizes[-2], stride=1, padding=(2 * kernel_sizes[-2] // 2 - 1), dilation=2, bn_eps=bn_eps, dropout_rate=dropout_rates[-2]) self.conv2 = MaskConvBlock1d( in_channels=channels[-2], out_channels=channels[-1], kernel_size=kernel_sizes[-1], stride=1, padding=(kernel_sizes[-1] // 2), bn_eps=bn_eps, dropout_rate=dropout_rates[-1]) def forward(self, x, x_len): if self.use_dr: x_len = x_len[0] x, x_len = self.conv1(x, x_len) x, x_len = self.conv2(x, x_len) return x, x_len class Jasper(nn.Module): """ Jasper/DR/QuartzNet model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- channels : list of int Number of output channels for each unit and initial/final block. kernel_sizes : list of int Kernel sizes for each unit and initial/final block. bn_eps : float Small float added to variance in Batch norm. dropout_rates : list of int Dropout rates for each unit and initial/final block. repeat : int Count of body convolution blocks. use_dw : bool Whether to use depthwise block. use_dr : bool Whether to use dense residual scheme. from_audio : bool, default True Whether to treat input as audio instead of Mel-specs. dither : float, default 0.0 Amount of white-noise dithering. return_text : bool, default False Whether to return text instead of logits. vocabulary : list of str or None, default None Vocabulary of the dataset. in_channels : int, default 64 Number of input channels (audio features). num_classes : int, default 29 Number of classification classes (number of graphemes). """ def __init__(self, channels, kernel_sizes, bn_eps, dropout_rates, repeat, use_dw, use_dr, from_audio=True, dither=0.0, return_text=False, vocabulary=None, in_channels=64, num_classes=29): super(Jasper, self).__init__() self.in_size = in_channels self.num_classes = num_classes self.vocabulary = vocabulary self.from_audio = from_audio self.return_text = return_text if self.from_audio: self.preprocessor = NemoMelSpecExtractor(dither=dither) self.features = DualPathSequential() init_block_class = DwsConvBlock1d if use_dw else MaskConvBlock1d self.features.add_module("init_block", init_block_class( in_channels=in_channels, out_channels=channels[0], kernel_size=kernel_sizes[0], stride=2, padding=(kernel_sizes[0] // 2), bn_eps=bn_eps, dropout_rate=dropout_rates[0])) in_channels = channels[0] in_channels_list = [] for i, (out_channels, kernel_size, dropout_rate) in\ enumerate(zip(channels[1:-2], kernel_sizes[1:-2], dropout_rates[1:-2])): in_channels_list += [in_channels] self.features.add_module("unit{}".format(i + 1), JasperUnit( in_channels=(in_channels_list if use_dr else in_channels), out_channels=out_channels, kernel_size=kernel_size, bn_eps=bn_eps, dropout_rate=dropout_rate, repeat=repeat, use_dw=use_dw, use_dr=use_dr)) in_channels = out_channels self.features.add_module("final_block", JasperFinalBlock( in_channels=in_channels, channels=channels, kernel_sizes=kernel_sizes, bn_eps=bn_eps, dropout_rates=dropout_rates, use_dw=use_dw, use_dr=use_dr)) in_channels = channels[-1] self.output = conv1d1( in_channels=in_channels, out_channels=num_classes, bias=True) if self.return_text: self.ctc_decoder = CtcDecoder(vocabulary=vocabulary) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x, x_len=None): if x_len is None: assert (type(x) in (list, tuple)) x, x_len = x if self.from_audio: x, x_len = self.preprocessor(x, x_len) x, x_len = self.features(x, x_len) x = self.output(x) if self.return_text: greedy_predictions = x.transpose(1, 2).log_softmax(dim=-1).argmax(dim=-1, keepdim=False).cpu().numpy() return self.ctc_decoder(greedy_predictions) else: return x, x_len def get_jasper(version, use_dw=False, use_dr=False, bn_eps=1e-3, vocabulary=None, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create Jasper/DR/QuartzNet model with specific parameters. Parameters: ---------- version : tuple of str Model type and configuration. use_dw : bool, default False Whether to use depthwise block. use_dr : bool, default False Whether to use dense residual scheme. bn_eps : float, default 1e-3 Small float added to variance in Batch norm. vocabulary : list of str or None, default None Vocabulary of the dataset. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ import numpy as np blocks, repeat = tuple(map(int, version[1].split("x"))) main_stage_repeat = blocks // 5 model_type = version[0] if model_type == "jasper": channels_per_stage = [256, 256, 384, 512, 640, 768, 896, 1024] kernel_sizes_per_stage = [11, 11, 13, 17, 21, 25, 29, 1] dropout_rates_per_stage = [0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4] elif model_type == "quartznet": channels_per_stage = [256, 256, 256, 512, 512, 512, 512, 1024] kernel_sizes_per_stage = [33, 33, 39, 51, 63, 75, 87, 1] dropout_rates_per_stage = [0.0] * 8 else: raise ValueError("Unsupported Jasper family model type: {}".format(model_type)) stage_repeat = np.full((8,), 1) stage_repeat[1:-2] *= main_stage_repeat channels = sum([[a] * r for (a, r) in zip(channels_per_stage, stage_repeat)], []) kernel_sizes = sum([[a] * r for (a, r) in zip(kernel_sizes_per_stage, stage_repeat)], []) dropout_rates = sum([[a] * r for (a, r) in zip(dropout_rates_per_stage, stage_repeat)], []) net = Jasper( channels=channels, kernel_sizes=kernel_sizes, bn_eps=bn_eps, dropout_rates=dropout_rates, repeat=repeat, use_dw=use_dw, use_dr=use_dr, vocabulary=vocabulary, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def jasper5x3(**kwargs): """ Jasper 5x3 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_jasper(version=("jasper", "5x3"), model_name="jasper5x3", **kwargs) def jasper10x4(**kwargs): """ Jasper 10x4 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_jasper(version=("jasper", "10x4"), model_name="jasper10x4", **kwargs) def jasper10x5(**kwargs): """ Jasper 10x5 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_jasper(version=("jasper", "10x5"), model_name="jasper10x5", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False from_audio = True audio_features = 64 num_classes = 29 use_cuda = True models = [ jasper5x3, jasper10x4, jasper10x5, ] for model in models: net = model( in_channels=audio_features, num_classes=num_classes, from_audio=from_audio, pretrained=pretrained) if use_cuda: net = net.cuda() # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != jasper5x3 or weight_count == 107681053) assert (model != jasper10x4 or weight_count == 261393693) assert (model != jasper10x5 or weight_count == 322286877) batch = 3 aud_scale = 640 if from_audio else 1 seq_len = np.random.randint(150, 250, batch) * aud_scale seq_len_max = seq_len.max() + 2 x_shape = (batch, seq_len_max) if from_audio else (batch, audio_features, seq_len_max) x = torch.randn(x_shape) x_len = torch.tensor(seq_len, dtype=torch.long, device=x.device) if use_cuda: x = x.cuda() x_len = x_len.cuda() y, y_len = net(x, x_len) # y.sum().backward() assert (tuple(y.size())[:2] == (batch, net.num_classes)) if from_audio: assert (y.size()[2] in range(seq_len_max // aud_scale * 2, seq_len_max // aud_scale * 2 + 9)) else: assert (y.size()[2] in [seq_len_max // 2, seq_len_max // 2 + 1]) if __name__ == "__main__": _test()
35,202
29.347414
117
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/resneta.py
""" ResNet(A) with average downsampling for ImageNet-1K, implemented in PyTorch. Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. """ __all__ = ['ResNetA', 'resneta10', 'resnetabc14b', 'resneta18', 'resneta50b', 'resneta101b', 'resneta152b'] import os import torch.nn as nn from .common import conv1x1_block from .resnet import ResBlock, ResBottleneck from .senet import SEInitBlock class ResADownBlock(nn.Module): """ ResNet(A) downsample block for the identity branch of a residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. dilation : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer in bottleneck. """ def __init__(self, in_channels, out_channels, stride, dilation=1): super(ResADownBlock, self).__init__() self.pool = nn.AvgPool2d( kernel_size=(stride if dilation == 1 else 1), stride=(stride if dilation == 1 else 1), ceil_mode=True, count_include_pad=False) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=None) def forward(self, x): x = self.pool(x) x = self.conv(x) return x class ResAUnit(nn.Module): """ ResNet(A) unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for the second convolution layer in bottleneck. dilation : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer in bottleneck. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, bottleneck=True, conv1_stride=False): super(ResAUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, padding=padding, dilation=dilation, conv1_stride=conv1_stride) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_block = ResADownBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, dilation=dilation) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_block(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class ResNetA(nn.Module): """ ResNet(A) with average downsampling model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. dilated : bool, default False Whether to use dilation. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, dilated=False, in_channels=3, in_size=(224, 224), num_classes=1000): super(ResNetA, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", SEInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): if dilated: stride = 2 if ((j == 0) and (i != 0) and (i < 2)) else 1 dilation = (2 ** max(0, i - 1 - int(j == 0))) else: stride = 2 if (j == 0) and (i != 0) else 1 dilation = 1 stage.add_module("unit{}".format(j + 1), ResAUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, padding=dilation, dilation=dilation, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_resneta(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ResNet(A) with average downsampling model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported ResNet(A) with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = ResNetA( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def resneta10(**kwargs): """ ResNet(A)-10 with average downsampling model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resneta(blocks=10, model_name="resneta10", **kwargs) def resnetabc14b(**kwargs): """ ResNet(A)-BC-14b with average downsampling model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resneta(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetabc14b", **kwargs) def resneta18(**kwargs): """ ResNet(A)-18 with average downsampling model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resneta(blocks=18, model_name="resneta18", **kwargs) def resneta50b(**kwargs): """ ResNet(A)-50 with average downsampling model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resneta(blocks=50, conv1_stride=False, model_name="resneta50b", **kwargs) def resneta101b(**kwargs): """ ResNet(A)-101 with average downsampling model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resneta(blocks=101, conv1_stride=False, model_name="resneta101b", **kwargs) def resneta152b(**kwargs): """ ResNet(A)-152 with average downsampling model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resneta(blocks=152, conv1_stride=False, model_name="resneta152b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ resneta10, resnetabc14b, resneta18, resneta50b, resneta101b, resneta152b, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resneta10 or weight_count == 5438024) assert (model != resnetabc14b or weight_count == 10084168) assert (model != resneta18 or weight_count == 11708744) assert (model != resneta50b or weight_count == 25576264) assert (model != resneta101b or weight_count == 44568392) assert (model != resneta152b or weight_count == 60212040) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
14,395
32.094253
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/resnesta.py
""" ResNeSt(A) with average downsampling for ImageNet-1K, implemented in PyTorch. Original paper: 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. """ __all__ = ['ResNeStA', 'resnestabc14', 'resnesta18', 'resnestabc26', 'resnesta50', 'resnesta101', 'resnesta152', 'resnesta200', 'resnesta269', 'ResNeStADownBlock'] import os import torch.nn as nn from .common import conv1x1_block, conv3x3_block, saconv3x3_block from .senet import SEInitBlock class ResNeStABlock(nn.Module): """ Simple ResNeSt(A) block for residual path in ResNeSt(A) unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. """ def __init__(self, in_channels, out_channels, stride, bias=False, use_bn=True): super(ResNeStABlock, self).__init__() self.resize = (stride > 1) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, bias=bias, use_bn=use_bn) if self.resize: self.pool = nn.AvgPool2d( kernel_size=3, stride=stride, padding=1) self.conv2 = saconv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=bias, use_bn=use_bn, activation=None) def forward(self, x): x = self.conv1(x) if self.resize: x = self.pool(x) x = self.conv2(x) return x class ResNeStABottleneck(nn.Module): """ ResNeSt(A) bottleneck block for residual path in ResNeSt(A) unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, bottleneck_factor=4): super(ResNeStABottleneck, self).__init__() self.resize = (stride > 1) mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = saconv3x3_block( in_channels=mid_channels, out_channels=mid_channels) if self.resize: self.pool = nn.AvgPool2d( kernel_size=3, stride=stride, padding=1) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) if self.resize: x = self.pool(x) x = self.conv3(x) return x class ResNeStADownBlock(nn.Module): """ ResNeSt(A) downsample block for the identity branch of a residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(ResNeStADownBlock, self).__init__() self.pool = nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=None) def forward(self, x): x = self.pool(x) x = self.conv(x) return x class ResNeStAUnit(nn.Module): """ ResNeSt(A) unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. """ def __init__(self, in_channels, out_channels, stride, bottleneck=True): super(ResNeStAUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = ResNeStABottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride) else: self.body = ResNeStABlock( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_block = ResNeStADownBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_block(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class ResNeStA(nn.Module): """ ResNeSt(A) with average downsampling model from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. dropout_rate : float, default 0.0 Fraction of the input units to drop. Must be a number between 0 and 1. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, dropout_rate=0.0, in_channels=3, in_size=(224, 224), num_classes=1000): super(ResNeStA, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", SEInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), ResNeStAUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1)) self.output = nn.Sequential() if dropout_rate > 0.0: self.output.add_module("dropout", nn.Dropout(p=dropout_rate)) self.output.add_module("fc", nn.Linear( in_features=in_channels, out_features=num_classes)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_resnesta(blocks, bottleneck=None, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ResNeSt(A) with average downsampling model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] elif blocks == 269: layers = [3, 30, 48, 8] else: raise ValueError("Unsupported ResNeSt(A) with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if blocks >= 101: init_block_channels *= 2 if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = ResNeStA( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def resnestabc14(**kwargs): """ ResNeSt(A)-BC-14 with average downsampling model from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnesta(blocks=14, bottleneck=True, model_name="resnestabc14", **kwargs) def resnesta18(**kwargs): """ ResNeSt(A)-18 with average downsampling model from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnesta(blocks=18, model_name="resnesta18", **kwargs) def resnestabc26(**kwargs): """ ResNeSt(A)-BC-26 with average downsampling model from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnesta(blocks=26, bottleneck=True, model_name="resnestabc26", **kwargs) def resnesta50(**kwargs): """ ResNeSt(A)-50 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnesta(blocks=50, model_name="resnesta50", **kwargs) def resnesta101(**kwargs): """ ResNeSt(A)-101 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnesta(blocks=101, model_name="resnesta101", **kwargs) def resnesta152(**kwargs): """ ResNeSt(A)-152 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnesta(blocks=152, model_name="resnesta152", **kwargs) def resnesta200(in_size=(256, 256), **kwargs): """ ResNeSt(A)-200 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- in_size : tuple of two ints, default (256, 256) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnesta(blocks=200, in_size=in_size, dropout_rate=0.2, model_name="resnesta200", **kwargs) def resnesta269(in_size=(320, 320), **kwargs): """ ResNeSt(A)-269 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- in_size : tuple of two ints, default (320, 320) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnesta(blocks=269, in_size=in_size, dropout_rate=0.2, model_name="resnesta269", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (resnestabc14, 224), (resnesta18, 224), (resnestabc26, 224), (resnesta50, 224), (resnesta101, 224), (resnesta152, 224), (resnesta200, 256), (resnesta269, 320), ] for model, size in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnestabc14 or weight_count == 10611688) assert (model != resnesta18 or weight_count == 12763784) assert (model != resnestabc26 or weight_count == 17069448) assert (model != resnesta50 or weight_count == 27483240) assert (model != resnesta101 or weight_count == 48275016) assert (model != resnesta152 or weight_count == 65316040) assert (model != resnesta200 or weight_count == 70201544) assert (model != resnesta269 or weight_count == 110929480) batch = 14 x = torch.randn(batch, 3, size, size) y = net(x) y.sum().backward() assert (tuple(y.size()) == (batch, 1000)) if __name__ == "__main__": _test()
17,572
30.892922
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/senet.py
""" SENet for ImageNet-1K, implemented in PyTorch. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['SENet', 'senet16', 'senet28', 'senet40', 'senet52', 'senet103', 'senet154', 'SEInitBlock'] import os import math import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block, SEBlock class SENetBottleneck(nn.Module): """ SENet bottleneck block for residual path in SENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. """ def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width): super(SENetBottleneck, self).__init__() mid_channels = out_channels // 4 D = int(math.floor(mid_channels * (bottleneck_width / 64.0))) group_width = cardinality * D group_width2 = group_width // 2 self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=group_width2) self.conv2 = conv3x3_block( in_channels=group_width2, out_channels=group_width, stride=stride, groups=cardinality) self.conv3 = conv1x1_block( in_channels=group_width, out_channels=out_channels, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class SENetUnit(nn.Module): """ SENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. identity_conv3x3 : bool, default False Whether to use 3x3 convolution in the identity link. """ def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width, identity_conv3x3): super(SENetUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = SENetBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width) self.se = SEBlock(channels=out_channels) if self.resize_identity: if identity_conv3x3: self.identity_conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) else: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = self.se(x) x = x + identity x = self.activ(x) return x class SEInitBlock(nn.Module): """ SENet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(SEInitBlock, self).__init__() mid_channels = out_channels // 2 self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels) self.conv3 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.pool(x) return x class SENet(nn.Module): """ SENet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(224, 224), num_classes=1000): super(SENet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", SEInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() identity_conv3x3 = (i != 0) for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), SENetUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width, identity_conv3x3=identity_conv3x3)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Sequential() self.output.add_module("dropout", nn.Dropout(p=0.2)) self.output.add_module("fc", nn.Linear( in_features=in_channels, out_features=num_classes)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_senet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SENet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 16: layers = [1, 1, 1, 1] cardinality = 32 elif blocks == 28: layers = [2, 2, 2, 2] cardinality = 32 elif blocks == 40: layers = [3, 3, 3, 3] cardinality = 32 elif blocks == 52: layers = [3, 4, 6, 3] cardinality = 32 elif blocks == 103: layers = [3, 4, 23, 3] cardinality = 32 elif blocks == 154: layers = [3, 8, 36, 3] cardinality = 64 else: raise ValueError("Unsupported SENet with number of blocks: {}".format(blocks)) bottleneck_width = 4 init_block_channels = 128 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = SENet( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def senet16(**kwargs): """ SENet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_senet(blocks=16, model_name="senet16", **kwargs) def senet28(**kwargs): """ SENet-28 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_senet(blocks=28, model_name="senet28", **kwargs) def senet40(**kwargs): """ SENet-40 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_senet(blocks=40, model_name="senet40", **kwargs) def senet52(**kwargs): """ SENet-52 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_senet(blocks=52, model_name="senet52", **kwargs) def senet103(**kwargs): """ SENet-103 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_senet(blocks=103, model_name="senet103", **kwargs) def senet154(**kwargs): """ SENet-154 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_senet(blocks=154, model_name="senet154", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ senet16, senet28, senet40, senet52, senet103, senet154, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != senet16 or weight_count == 31366168) assert (model != senet28 or weight_count == 36453768) assert (model != senet40 or weight_count == 41541368) assert (model != senet52 or weight_count == 44659416) assert (model != senet103 or weight_count == 60963096) assert (model != senet154 or weight_count == 115088984) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
13,095
28.696145
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/diapreresnet_cifar.py
""" DIA-PreResNet for CIFAR/SVHN, implemented in PyTorch. Original papers: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. """ __all__ = ['CIFARDIAPreResNet', 'diapreresnet20_cifar10', 'diapreresnet20_cifar100', 'diapreresnet20_svhn', 'diapreresnet56_cifar10', 'diapreresnet56_cifar100', 'diapreresnet56_svhn', 'diapreresnet110_cifar10', 'diapreresnet110_cifar100', 'diapreresnet110_svhn', 'diapreresnet164bn_cifar10', 'diapreresnet164bn_cifar100', 'diapreresnet164bn_svhn', 'diapreresnet1001_cifar10', 'diapreresnet1001_cifar100', 'diapreresnet1001_svhn', 'diapreresnet1202_cifar10', 'diapreresnet1202_cifar100', 'diapreresnet1202_svhn'] import os import torch.nn as nn import torch.nn.init as init from .common import conv3x3, DualPathSequential from .preresnet import PreResActivation from .diaresnet import DIAAttention from .diapreresnet import DIAPreResUnit class CIFARDIAPreResNet(nn.Module): """ DIA-PreResNet model for CIFAR from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARDIAPreResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential(return_two=False) attention = DIAAttention( in_x_features=channels_per_stage[0], in_h_features=channels_per_stage[0]) for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), DIAPreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False, attention=attention)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_diapreresnet_cifar(num_classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DIA-PreResNet model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (num_classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARDIAPreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def diapreresnet20_cifar10(num_classes=10, **kwargs): """ DIA-PreResNet-20 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="diapreresnet20_cifar10", **kwargs) def diapreresnet20_cifar100(num_classes=100, **kwargs): """ DIA-PreResNet-20 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="diapreresnet20_cifar100", **kwargs) def diapreresnet20_svhn(num_classes=10, **kwargs): """ DIA-PreResNet-20 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="diapreresnet20_svhn", **kwargs) def diapreresnet56_cifar10(num_classes=10, **kwargs): """ DIA-PreResNet-56 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="diapreresnet56_cifar10", **kwargs) def diapreresnet56_cifar100(num_classes=100, **kwargs): """ DIA-PreResNet-56 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="diapreresnet56_cifar100", **kwargs) def diapreresnet56_svhn(num_classes=10, **kwargs): """ DIA-PreResNet-56 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="diapreresnet56_svhn", **kwargs) def diapreresnet110_cifar10(num_classes=10, **kwargs): """ DIA-PreResNet-110 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="diapreresnet110_cifar10", **kwargs) def diapreresnet110_cifar100(num_classes=100, **kwargs): """ DIA-PreResNet-110 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="diapreresnet110_cifar100", **kwargs) def diapreresnet110_svhn(num_classes=10, **kwargs): """ DIA-PreResNet-110 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="diapreresnet110_svhn", **kwargs) def diapreresnet164bn_cifar10(num_classes=10, **kwargs): """ DIA-PreResNet-164(BN) model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="diapreresnet164bn_cifar10", **kwargs) def diapreresnet164bn_cifar100(num_classes=100, **kwargs): """ DIA-PreResNet-164(BN) model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="diapreresnet164bn_cifar100", **kwargs) def diapreresnet164bn_svhn(num_classes=10, **kwargs): """ DIA-PreResNet-164(BN) model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="diapreresnet164bn_svhn", **kwargs) def diapreresnet1001_cifar10(num_classes=10, **kwargs): """ DIA-PreResNet-1001 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="diapreresnet1001_cifar10", **kwargs) def diapreresnet1001_cifar100(num_classes=100, **kwargs): """ DIA-PreResNet-1001 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="diapreresnet1001_cifar100", **kwargs) def diapreresnet1001_svhn(num_classes=10, **kwargs): """ DIA-PreResNet-1001 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="diapreresnet1001_svhn", **kwargs) def diapreresnet1202_cifar10(num_classes=10, **kwargs): """ DIA-PreResNet-1202 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="diapreresnet1202_cifar10", **kwargs) def diapreresnet1202_cifar100(num_classes=100, **kwargs): """ DIA-PreResNet-1202 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="diapreresnet1202_cifar100", **kwargs) def diapreresnet1202_svhn(num_classes=10, **kwargs): """ DIA-PreResNet-1202 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="diapreresnet1202_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (diapreresnet20_cifar10, 10), (diapreresnet20_cifar100, 100), (diapreresnet20_svhn, 10), (diapreresnet56_cifar10, 10), (diapreresnet56_cifar100, 100), (diapreresnet56_svhn, 10), (diapreresnet110_cifar10, 10), (diapreresnet110_cifar100, 100), (diapreresnet110_svhn, 10), (diapreresnet164bn_cifar10, 10), (diapreresnet164bn_cifar100, 100), (diapreresnet164bn_svhn, 10), (diapreresnet1001_cifar10, 10), (diapreresnet1001_cifar100, 100), (diapreresnet1001_svhn, 10), (diapreresnet1202_cifar10, 10), (diapreresnet1202_cifar100, 100), (diapreresnet1202_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != diapreresnet20_cifar10 or weight_count == 286674) assert (model != diapreresnet20_cifar100 or weight_count == 292524) assert (model != diapreresnet20_svhn or weight_count == 286674) assert (model != diapreresnet56_cifar10 or weight_count == 869970) assert (model != diapreresnet56_cifar100 or weight_count == 875820) assert (model != diapreresnet56_svhn or weight_count == 869970) assert (model != diapreresnet110_cifar10 or weight_count == 1744914) assert (model != diapreresnet110_cifar100 or weight_count == 1750764) assert (model != diapreresnet110_svhn or weight_count == 1744914) assert (model != diapreresnet164bn_cifar10 or weight_count == 1922106) assert (model != diapreresnet164bn_cifar100 or weight_count == 1945236) assert (model != diapreresnet164bn_svhn or weight_count == 1922106) assert (model != diapreresnet1001_cifar10 or weight_count == 10546554) assert (model != diapreresnet1001_cifar100 or weight_count == 10569684) assert (model != diapreresnet1001_svhn or weight_count == 10546554) assert (model != diapreresnet1202_cifar10 or weight_count == 19438226) assert (model != diapreresnet1202_cifar100 or weight_count == 19444076) assert (model != diapreresnet1202_svhn or weight_count == 19438226) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes)) if __name__ == "__main__": _test()
20,604
36.327899
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/simplepose_coco.py
""" SimplePose for COCO Keypoint, implemented in PyTorch. Original paper: 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. """ __all__ = ['SimplePose', 'simplepose_resnet18_coco', 'simplepose_resnet50b_coco', 'simplepose_resnet101b_coco', 'simplepose_resnet152b_coco', 'simplepose_resneta50b_coco', 'simplepose_resneta101b_coco', 'simplepose_resneta152b_coco'] import os import torch import torch.nn as nn from .common import DeconvBlock, conv1x1, HeatmapMaxDetBlock from .resnet import resnet18, resnet50b, resnet101b, resnet152b from .resneta import resneta50b, resneta101b, resneta152b class SimplePose(nn.Module): """ SimplePose model from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. channels : list of int Number of output channels for each decoder unit. return_heatmap : bool, default False Whether to return only heatmap. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (256, 192) Spatial size of the expected input image. keypoints : int, default 17 Number of keypoints. """ def __init__(self, backbone, backbone_out_channels, channels, return_heatmap=False, in_channels=3, in_size=(256, 192), keypoints=17): super(SimplePose, self).__init__() assert (in_channels == 3) self.in_size = in_size self.keypoints = keypoints self.return_heatmap = return_heatmap self.backbone = backbone self.decoder = nn.Sequential() in_channels = backbone_out_channels for i, out_channels in enumerate(channels): self.decoder.add_module("unit{}".format(i + 1), DeconvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=4, stride=2, padding=1)) in_channels = out_channels self.decoder.add_module("final_block", conv1x1( in_channels=in_channels, out_channels=keypoints, bias=True)) self.heatmap_max_det = HeatmapMaxDetBlock() self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.backbone(x) heatmap = self.decoder(x) if self.return_heatmap: return heatmap else: keypoints = self.heatmap_max_det(heatmap) return keypoints def get_simplepose(backbone, backbone_out_channels, keypoints, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SimplePose model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. keypoints : int Number of keypoints. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [256, 256, 256] net = SimplePose( backbone=backbone, backbone_out_channels=backbone_out_channels, channels=channels, keypoints=keypoints, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def simplepose_resnet18_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose model on the base of ResNet-18 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnet18(pretrained=pretrained_backbone).features del backbone[-1] return get_simplepose(backbone=backbone, backbone_out_channels=512, keypoints=keypoints, model_name="simplepose_resnet18_coco", **kwargs) def simplepose_resnet50b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose model on the base of ResNet-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnet50b(pretrained=pretrained_backbone).features del backbone[-1] return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resnet50b_coco", **kwargs) def simplepose_resnet101b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose model on the base of ResNet-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnet101b(pretrained=pretrained_backbone).features del backbone[-1] return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resnet101b_coco", **kwargs) def simplepose_resnet152b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose model on the base of ResNet-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnet152b(pretrained=pretrained_backbone).features del backbone[-1] return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resnet152b_coco", **kwargs) def simplepose_resneta50b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose model on the base of ResNet(A)-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resneta50b(pretrained=pretrained_backbone).features del backbone[-1] return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resneta50b_coco", **kwargs) def simplepose_resneta101b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose model on the base of ResNet(A)-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resneta101b(pretrained=pretrained_backbone).features del backbone[-1] return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resneta101b_coco", **kwargs) def simplepose_resneta152b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose model on the base of ResNet(A)-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resneta152b(pretrained=pretrained_backbone).features del backbone[-1] return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resneta152b_coco", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): in_size = (256, 192) keypoints = 17 return_heatmap = False pretrained = False models = [ simplepose_resnet18_coco, simplepose_resnet50b_coco, simplepose_resnet101b_coco, simplepose_resnet152b_coco, simplepose_resneta50b_coco, simplepose_resneta101b_coco, simplepose_resneta152b_coco, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != simplepose_resnet18_coco or weight_count == 15376721) assert (model != simplepose_resnet50b_coco or weight_count == 33999697) assert (model != simplepose_resnet101b_coco or weight_count == 52991825) assert (model != simplepose_resnet152b_coco or weight_count == 68635473) assert (model != simplepose_resneta50b_coco or weight_count == 34018929) assert (model != simplepose_resneta101b_coco or weight_count == 53011057) assert (model != simplepose_resneta152b_coco or weight_count == 68654705) batch = 14 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) assert ((y.shape[0] == batch) and (y.shape[1] == keypoints)) if return_heatmap: assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4)) else: assert (y.shape[2] == 3) if __name__ == "__main__": _test()
12,777
36.145349
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/vovnet.py
""" VoVNet for ImageNet-1K, implemented in PyTorch. Original paper: 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. """ __all__ = ['VoVNet', 'vovnet27s', 'vovnet39', 'vovnet57'] import os import torch.nn as nn from .common import conv1x1_block, conv3x3_block, SequentialConcurrent class VoVUnit(nn.Module): """ VoVNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. branch_channels : int Number of output channels for each branch. num_branches : int Number of branches. resize : bool Whether to use resize block. use_residual : bool Whether to use residual block. """ def __init__(self, in_channels, out_channels, branch_channels, num_branches, resize, use_residual): super(VoVUnit, self).__init__() self.resize = resize self.use_residual = use_residual if self.resize: self.pool = nn.MaxPool2d( kernel_size=3, stride=2, ceil_mode=True) self.branches = SequentialConcurrent() branch_in_channels = in_channels for i in range(num_branches): self.branches.add_module("branch{}".format(i + 1), conv3x3_block( in_channels=branch_in_channels, out_channels=branch_channels)) branch_in_channels = branch_channels self.concat_conv = conv1x1_block( in_channels=(in_channels + num_branches * branch_channels), out_channels=out_channels) def forward(self, x): if self.resize: x = self.pool(x) if self.use_residual: identity = x x = self.branches(x) x = self.concat_conv(x) if self.use_residual: x = x + identity return x class VoVInitBlock(nn.Module): """ VoVNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(VoVInitBlock, self).__init__() mid_channels = out_channels // 2 self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels) self.conv3 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, stride=2) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class VoVNet(nn.Module): """ VoVNet model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. Parameters: ---------- channels : list of list of int Number of output channels for each unit. branch_channels : list of list of int Number of branch output channels for each unit. num_branches : int Number of branches for the each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, branch_channels, num_branches, in_channels=3, in_size=(224, 224), num_classes=1000): super(VoVNet, self).__init__() self.in_size = in_size self.num_classes = num_classes init_block_channels = 128 self.features = nn.Sequential() self.features.add_module("init_block", VoVInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): use_residual = (j != 0) resize = (j == 0) and (i != 0) stage.add_module("unit{}".format(j + 1), VoVUnit( in_channels=in_channels, out_channels=out_channels, branch_channels=branch_channels[i][j], num_branches=num_branches, resize=resize, use_residual=use_residual)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight, mode="fan_out", nonlinearity="relu") if module.bias is not None: nn.init.constant_(module.bias, 0) elif isinstance(module, nn.BatchNorm2d): nn.init.constant_(module.weight, 1) nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_vovnet(blocks, slim=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. slim : bool, default False Whether to use a slim model. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 27: layers = [1, 1, 1, 1] elif blocks == 39: layers = [1, 1, 2, 2] elif blocks == 57: layers = [1, 1, 4, 3] else: raise ValueError("Unsupported VoVNet with number of blocks: {}".format(blocks)) assert (sum(layers) * 6 + 3 == blocks) num_branches = 5 channels_per_layers = [256, 512, 768, 1024] branch_channels_per_layers = [128, 160, 192, 224] if slim: channels_per_layers = [ci // 2 for ci in channels_per_layers] branch_channels_per_layers = [ci // 2 for ci in branch_channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] branch_channels = [[ci] * li for (ci, li) in zip(branch_channels_per_layers, layers)] net = VoVNet( channels=channels, branch_channels=branch_channels, num_branches=num_branches, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def vovnet27s(**kwargs): """ VoVNet-27-slim model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vovnet(blocks=27, slim=True, model_name="vovnet27s", **kwargs) def vovnet39(**kwargs): """ VoVNet-39 model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vovnet(blocks=39, model_name="vovnet39", **kwargs) def vovnet57(**kwargs): """ VoVNet-57 model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vovnet(blocks=57, model_name="vovnet57", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ vovnet27s, vovnet39, vovnet57, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != vovnet27s or weight_count == 3525736) assert (model != vovnet39 or weight_count == 22600296) assert (model != vovnet57 or weight_count == 36640296) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
10,220
29.601796
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/espnetv2.py
""" ESPNetv2 for ImageNet-1K, implemented in PyTorch. Original paper: 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. """ __all__ = ['ESPNetv2', 'espnetv2_wd2', 'espnetv2_w1', 'espnetv2_w5d4', 'espnetv2_w3d2', 'espnetv2_w2'] import os import math import torch import torch.nn as nn import torch.nn.init as init from .common import conv3x3, conv1x1_block, conv3x3_block, DualPathSequential class PreActivation(nn.Module): """ PreResNet like pure pre-activation block without convolution layer. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(PreActivation, self).__init__() self.bn = nn.BatchNorm2d(num_features=in_channels) self.activ = nn.PReLU(num_parameters=in_channels) def forward(self, x): x = self.bn(x) x = self.activ(x) return x class ShortcutBlock(nn.Module): """ ESPNetv2 shortcut block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ShortcutBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=in_channels, activation=(lambda: nn.PReLU(in_channels))) self.conv2 = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class HierarchicalConcurrent(nn.Sequential): """ A container for hierarchical concatenation of modules on the base of the sequential container. Parameters: ---------- axis : int, default 1 The axis on which to concatenate the outputs. """ def __init__(self, axis=1): super(HierarchicalConcurrent, self).__init__() self.axis = axis def forward(self, x): out = [] y_prev = None for module in self._modules.values(): y = module(x) if y_prev is not None: y += y_prev out.append(y) y_prev = y out = torch.cat(tuple(out), dim=self.axis) return out class ESPBlock(nn.Module): """ ESPNetv2 block (so-called EESP block). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the branch convolution layers. dilations : list of int Dilation values for branches. """ def __init__(self, in_channels, out_channels, stride, dilations): super(ESPBlock, self).__init__() num_branches = len(dilations) assert (out_channels % num_branches == 0) self.downsample = (stride != 1) mid_channels = out_channels // num_branches self.reduce_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, groups=num_branches, activation=(lambda: nn.PReLU(mid_channels))) self.branches = HierarchicalConcurrent() for i in range(num_branches): self.branches.add_module("branch{}".format(i + 1), conv3x3( in_channels=mid_channels, out_channels=mid_channels, stride=stride, padding=dilations[i], dilation=dilations[i], groups=mid_channels)) self.merge_conv = conv1x1_block( in_channels=out_channels, out_channels=out_channels, groups=num_branches, activation=None) self.preactiv = PreActivation(in_channels=out_channels) if not self.downsample: self.activ = nn.PReLU(out_channels) def forward(self, x, x0): y = self.reduce_conv(x) y = self.branches(y) y = self.preactiv(y) y = self.merge_conv(y) if not self.downsample: y = y + x y = self.activ(y) return y, x0 class DownsampleBlock(nn.Module): """ ESPNetv2 downsample block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. x0_channels : int Number of input channels for shortcut. dilations : list of int Dilation values for branches in EESP block. """ def __init__(self, in_channels, out_channels, x0_channels, dilations): super(DownsampleBlock, self).__init__() inc_channels = out_channels - in_channels self.pool = nn.AvgPool2d( kernel_size=3, stride=2, padding=1) self.eesp = ESPBlock( in_channels=in_channels, out_channels=inc_channels, stride=2, dilations=dilations) self.shortcut_block = ShortcutBlock( in_channels=x0_channels, out_channels=out_channels) self.activ = nn.PReLU(out_channels) def forward(self, x, x0): y1 = self.pool(x) y2, _ = self.eesp(x, None) x = torch.cat((y1, y2), dim=1) x0 = self.pool(x0) y3 = self.shortcut_block(x0) x = x + y3 x = self.activ(x) return x, x0 class ESPInitBlock(nn.Module): """ ESPNetv2 initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ESPInitBlock, self).__init__() self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, activation=(lambda: nn.PReLU(out_channels))) self.pool = nn.AvgPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x, x0): x = self.conv(x) x0 = self.pool(x0) return x, x0 class ESPFinalBlock(nn.Module): """ ESPNetv2 final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. final_groups : int Number of groups in the last convolution layer. """ def __init__(self, in_channels, out_channels, final_groups): super(ESPFinalBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=in_channels, groups=in_channels, activation=(lambda: nn.PReLU(in_channels))) self.conv2 = conv1x1_block( in_channels=in_channels, out_channels=out_channels, groups=final_groups, activation=(lambda: nn.PReLU(out_channels))) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class ESPNetv2(nn.Module): """ ESPNetv2 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final unit. final_block_groups : int Number of groups for the final unit. dilations : list of list of list of int Dilation values for branches in each unit. dropout_rate : float, default 0.2 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, final_block_groups, dilations, dropout_rate=0.2, in_channels=3, in_size=(224, 224), num_classes=1000): super(ESPNetv2, self).__init__() self.in_size = in_size self.num_classes = num_classes x0_channels = in_channels self.features = DualPathSequential( return_two=False, first_ordinals=0, last_ordinals=2) self.features.add_module("init_block", ESPInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential() for j, out_channels in enumerate(channels_per_stage): if j == 0: unit = DownsampleBlock( in_channels=in_channels, out_channels=out_channels, x0_channels=x0_channels, dilations=dilations[i][j]) else: unit = ESPBlock( in_channels=in_channels, out_channels=out_channels, stride=1, dilations=dilations[i][j]) stage.add_module("unit{}".format(j + 1), unit) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", ESPFinalBlock( in_channels=in_channels, out_channels=final_block_channels, final_groups=final_block_groups)) in_channels = final_block_channels self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Sequential() self.output.add_module("dropout", nn.Dropout(p=dropout_rate)) self.output.add_module("fc", nn.Linear( in_features=in_channels, out_features=num_classes)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x, x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_espnetv2(width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ESPNetv2 model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (width_scale <= 2.0) branches = 4 layers = [1, 4, 8, 4] max_dilation_list = [6, 5, 4, 3, 2] max_dilations = [[max_dilation_list[i]] + [max_dilation_list[i + 1]] * (li - 1) for (i, li) in enumerate(layers)] dilations = [[sorted([k + 1 if k < dij else 1 for k in range(branches)]) for dij in di] for di in max_dilations] base_channels = 32 weighed_base_channels = math.ceil(float(math.floor(base_channels * width_scale)) / branches) * branches channels_per_layers = [weighed_base_channels * pow(2, i + 1) for i in range(len(layers))] init_block_channels = base_channels if weighed_base_channels > base_channels else weighed_base_channels final_block_channels = 1024 if width_scale <= 1.5 else 1280 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = ESPNetv2( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, final_block_groups=branches, dilations=dilations, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def espnetv2_wd2(**kwargs): """ ESPNetv2 x0.5 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=0.5, model_name="espnetv2_wd2", **kwargs) def espnetv2_w1(**kwargs): """ ESPNetv2 x1.0 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=1.0, model_name="espnetv2_w1", **kwargs) def espnetv2_w5d4(**kwargs): """ ESPNetv2 x1.25 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=1.25, model_name="espnetv2_w5d4", **kwargs) def espnetv2_w3d2(**kwargs): """ ESPNetv2 x1.5 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=1.5, model_name="espnetv2_w3d2", **kwargs) def espnetv2_w2(**kwargs): """ ESPNetv2 x2.0 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=2.0, model_name="espnetv2_w2", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ espnetv2_wd2, espnetv2_w1, espnetv2_w5d4, espnetv2_w3d2, espnetv2_w2, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) # assert (model != espnetv2_wd2 or weight_count == 1241332) # assert (model != espnetv2_w1 or weight_count == 1670072) # assert (model != espnetv2_w5d4 or weight_count == 1965440) # assert (model != espnetv2_w3d2 or weight_count == 2314856) # assert (model != espnetv2_w2 or weight_count == 3498136) assert (model != espnetv2_wd2 or weight_count == 1241092) assert (model != espnetv2_w1 or weight_count == 1669592) assert (model != espnetv2_w5d4 or weight_count == 1964832) assert (model != espnetv2_w3d2 or weight_count == 2314120) assert (model != espnetv2_w2 or weight_count == 3497144) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
17,203
30.336976
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/shufflenet.py
""" ShuffleNet for ImageNet-1K, implemented in PyTorch. Original paper: 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. """ __all__ = ['ShuffleNet', 'shufflenet_g1_w1', 'shufflenet_g2_w1', 'shufflenet_g3_w1', 'shufflenet_g4_w1', 'shufflenet_g8_w1', 'shufflenet_g1_w3d4', 'shufflenet_g3_w3d4', 'shufflenet_g1_wd2', 'shufflenet_g3_wd2', 'shufflenet_g1_wd4', 'shufflenet_g3_wd4'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1, conv3x3, depthwise_conv3x3, ChannelShuffle class ShuffleUnit(nn.Module): """ ShuffleNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. groups : int Number of groups in convolution layers. downsample : bool Whether do downsample. ignore_group : bool Whether ignore group value in the first convolution layer. """ def __init__(self, in_channels, out_channels, groups, downsample, ignore_group): super(ShuffleUnit, self).__init__() self.downsample = downsample mid_channels = out_channels // 4 if downsample: out_channels -= in_channels self.compress_conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels, groups=(1 if ignore_group else groups)) self.compress_bn1 = nn.BatchNorm2d(num_features=mid_channels) self.c_shuffle = ChannelShuffle( channels=mid_channels, groups=groups) self.dw_conv2 = depthwise_conv3x3( channels=mid_channels, stride=(2 if self.downsample else 1)) self.dw_bn2 = nn.BatchNorm2d(num_features=mid_channels) self.expand_conv3 = conv1x1( in_channels=mid_channels, out_channels=out_channels, groups=groups) self.expand_bn3 = nn.BatchNorm2d(num_features=out_channels) if downsample: self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) self.activ = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.compress_conv1(x) x = self.compress_bn1(x) x = self.activ(x) x = self.c_shuffle(x) x = self.dw_conv2(x) x = self.dw_bn2(x) x = self.expand_conv3(x) x = self.expand_bn3(x) if self.downsample: identity = self.avgpool(identity) x = torch.cat((x, identity), dim=1) else: x = x + identity x = self.activ(x) return x class ShuffleInitBlock(nn.Module): """ ShuffleNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ShuffleInitBlock, self).__init__() self.conv = conv3x3( in_channels=in_channels, out_channels=out_channels, stride=2) self.bn = nn.BatchNorm2d(num_features=out_channels) self.activ = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.activ(x) x = self.pool(x) return x class ShuffleNet(nn.Module): """ ShuffleNet model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. groups : int Number of groups in convolution layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, groups, in_channels=3, in_size=(224, 224), num_classes=1000): super(ShuffleNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ShuffleInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): downsample = (j == 0) ignore_group = (i == 0) and (j == 0) stage.add_module("unit{}".format(j + 1), ShuffleUnit( in_channels=in_channels, out_channels=out_channels, groups=groups, downsample=downsample, ignore_group=ignore_group)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_shufflenet(groups, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ShuffleNet model with specific parameters. Parameters: ---------- groups : int Number of groups in convolution layers. width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 24 layers = [4, 8, 4] if groups == 1: channels_per_layers = [144, 288, 576] elif groups == 2: channels_per_layers = [200, 400, 800] elif groups == 3: channels_per_layers = [240, 480, 960] elif groups == 4: channels_per_layers = [272, 544, 1088] elif groups == 8: channels_per_layers = [384, 768, 1536] else: raise ValueError("The {} of groups is not supported".format(groups)) channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = int(init_block_channels * width_scale) net = ShuffleNet( channels=channels, init_block_channels=init_block_channels, groups=groups, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def shufflenet_g1_w1(**kwargs): """ ShuffleNet 1x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenet(groups=1, width_scale=1.0, model_name="shufflenet_g1_w1", **kwargs) def shufflenet_g2_w1(**kwargs): """ ShuffleNet 1x (g=2) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenet(groups=2, width_scale=1.0, model_name="shufflenet_g2_w1", **kwargs) def shufflenet_g3_w1(**kwargs): """ ShuffleNet 1x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenet(groups=3, width_scale=1.0, model_name="shufflenet_g3_w1", **kwargs) def shufflenet_g4_w1(**kwargs): """ ShuffleNet 1x (g=4) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenet(groups=4, width_scale=1.0, model_name="shufflenet_g4_w1", **kwargs) def shufflenet_g8_w1(**kwargs): """ ShuffleNet 1x (g=8) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenet(groups=8, width_scale=1.0, model_name="shufflenet_g8_w1", **kwargs) def shufflenet_g1_w3d4(**kwargs): """ ShuffleNet 0.75x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenet(groups=1, width_scale=0.75, model_name="shufflenet_g1_w3d4", **kwargs) def shufflenet_g3_w3d4(**kwargs): """ ShuffleNet 0.75x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenet(groups=3, width_scale=0.75, model_name="shufflenet_g3_w3d4", **kwargs) def shufflenet_g1_wd2(**kwargs): """ ShuffleNet 0.5x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenet(groups=1, width_scale=0.5, model_name="shufflenet_g1_wd2", **kwargs) def shufflenet_g3_wd2(**kwargs): """ ShuffleNet 0.5x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenet(groups=3, width_scale=0.5, model_name="shufflenet_g3_wd2", **kwargs) def shufflenet_g1_wd4(**kwargs): """ ShuffleNet 0.25x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenet(groups=1, width_scale=0.25, model_name="shufflenet_g1_wd4", **kwargs) def shufflenet_g3_wd4(**kwargs): """ ShuffleNet 0.25x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenet(groups=3, width_scale=0.25, model_name="shufflenet_g3_wd4", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ shufflenet_g1_w1, shufflenet_g2_w1, shufflenet_g3_w1, shufflenet_g4_w1, shufflenet_g8_w1, shufflenet_g1_w3d4, shufflenet_g3_w3d4, shufflenet_g1_wd2, shufflenet_g3_wd2, shufflenet_g1_wd4, shufflenet_g3_wd4, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != shufflenet_g1_w1 or weight_count == 1531936) assert (model != shufflenet_g2_w1 or weight_count == 1733848) assert (model != shufflenet_g3_w1 or weight_count == 1865728) assert (model != shufflenet_g4_w1 or weight_count == 1968344) assert (model != shufflenet_g8_w1 or weight_count == 2434768) assert (model != shufflenet_g1_w3d4 or weight_count == 975214) assert (model != shufflenet_g3_w3d4 or weight_count == 1238266) assert (model != shufflenet_g1_wd2 or weight_count == 534484) assert (model != shufflenet_g3_wd2 or weight_count == 718324) assert (model != shufflenet_g1_wd4 or weight_count == 209746) assert (model != shufflenet_g3_wd4 or weight_count == 305902) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
15,779
31.875
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/bamresnet.py
""" BAM-ResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. """ __all__ = ['BamResNet', 'bam_resnet18', 'bam_resnet34', 'bam_resnet50', 'bam_resnet101', 'bam_resnet152'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1, conv1x1_block, conv3x3_block from .resnet import ResInitBlock, ResUnit class DenseBlock(nn.Module): """ Standard dense block with Batch normalization and ReLU activation. Parameters: ---------- in_features : int Number of input features. out_features : int Number of output features. """ def __init__(self, in_features, out_features): super(DenseBlock, self).__init__() self.fc = nn.Linear( in_features=in_features, out_features=out_features) self.bn = nn.BatchNorm1d(num_features=out_features) self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.fc(x) x = self.bn(x) x = self.activ(x) return x class ChannelGate(nn.Module): """ BAM channel gate block. Parameters: ---------- channels : int Number of input/output channels. reduction_ratio : int, default 16 Channel reduction ratio. num_layers : int, default 1 Number of dense blocks. """ def __init__(self, channels, reduction_ratio=16, num_layers=1): super(ChannelGate, self).__init__() mid_channels = channels // reduction_ratio self.pool = nn.AdaptiveAvgPool2d(output_size=(1, 1)) self.init_fc = DenseBlock( in_features=channels, out_features=mid_channels) self.main_fcs = nn.Sequential() for i in range(num_layers - 1): self.main_fcs.add_module("fc{}".format(i + 1), DenseBlock( in_features=mid_channels, out_features=mid_channels)) self.final_fc = nn.Linear( in_features=mid_channels, out_features=channels) def forward(self, x): input = x x = self.pool(x) x = x.view(x.size(0), -1) x = self.init_fc(x) x = self.main_fcs(x) x = self.final_fc(x) x = x.unsqueeze(2).unsqueeze(3).expand_as(input) return x class SpatialGate(nn.Module): """ BAM spatial gate block. Parameters: ---------- channels : int Number of input/output channels. reduction_ratio : int, default 16 Channel reduction ratio. num_dil_convs : int, default 2 Number of dilated convolutions. dilation : int, default 4 Dilation/padding value for corresponding convolutions. """ def __init__(self, channels, reduction_ratio=16, num_dil_convs=2, dilation=4): super(SpatialGate, self).__init__() mid_channels = channels // reduction_ratio self.init_conv = conv1x1_block( in_channels=channels, out_channels=mid_channels, stride=1, bias=True) self.dil_convs = nn.Sequential() for i in range(num_dil_convs): self.dil_convs.add_module("conv{}".format(i + 1), conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=1, padding=dilation, dilation=dilation, bias=True)) self.final_conv = conv1x1( in_channels=mid_channels, out_channels=1, stride=1, bias=True) def forward(self, x): input = x x = self.init_conv(x) x = self.dil_convs(x) x = self.final_conv(x) x = x.expand_as(input) return x class BamBlock(nn.Module): """ BAM attention block for BAM-ResNet. Parameters: ---------- channels : int Number of input/output channels. """ def __init__(self, channels): super(BamBlock, self).__init__() self.ch_att = ChannelGate(channels=channels) self.sp_att = SpatialGate(channels=channels) self.sigmoid = nn.Sigmoid() def forward(self, x): att = 1 + self.sigmoid(self.ch_att(x) * self.sp_att(x)) x = x * att return x class BamResUnit(nn.Module): """ BAM-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. """ def __init__(self, in_channels, out_channels, stride, bottleneck): super(BamResUnit, self).__init__() self.use_bam = (stride != 1) if self.use_bam: self.bam = BamBlock(channels=in_channels) self.res_unit = ResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False) def forward(self, x): if self.use_bam: x = self.bam(x) x = self.res_unit(x) return x class BamResNet(nn.Module): """ BAM-ResNet model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(224, 224), num_classes=1000): super(BamResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), BamResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_resnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create BAM-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. use_se : bool Whether to use SE block. width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 18: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] else: raise ValueError("Unsupported BAM-ResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 if blocks < 50: channels_per_layers = [64, 128, 256, 512] bottleneck = False else: channels_per_layers = [256, 512, 1024, 2048] bottleneck = True channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = BamResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def bam_resnet18(**kwargs): """ BAM-ResNet-18 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, model_name="bam_resnet18", **kwargs) def bam_resnet34(**kwargs): """ BAM-ResNet-34 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=34, model_name="bam_resnet34", **kwargs) def bam_resnet50(**kwargs): """ BAM-ResNet-50 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=50, model_name="bam_resnet50", **kwargs) def bam_resnet101(**kwargs): """ BAM-ResNet-101 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=101, model_name="bam_resnet101", **kwargs) def bam_resnet152(**kwargs): """ BAM-ResNet-152 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=152, model_name="bam_resnet152", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ bam_resnet18, bam_resnet34, bam_resnet50, bam_resnet101, bam_resnet152, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != bam_resnet18 or weight_count == 11712503) assert (model != bam_resnet34 or weight_count == 21820663) assert (model != bam_resnet50 or weight_count == 25915099) assert (model != bam_resnet101 or weight_count == 44907227) assert (model != bam_resnet152 or weight_count == 60550875) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
13,297
28.420354
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/resattnet.py
""" ResAttNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. """ __all__ = ['ResAttNet', 'resattnet56', 'resattnet92', 'resattnet128', 'resattnet164', 'resattnet200', 'resattnet236', 'resattnet452'] import os import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from .common import conv1x1, conv7x7_block, pre_conv1x1_block, pre_conv3x3_block, Hourglass class PreResBottleneck(nn.Module): """ PreResNet bottleneck block for residual path in PreResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(PreResBottleneck, self).__init__() mid_channels = out_channels // 4 self.conv1 = pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels, return_preact=True) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride) self.conv3 = pre_conv1x1_block( in_channels=mid_channels, out_channels=out_channels) def forward(self, x): x, x_pre_activ = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x, x_pre_activ class ResBlock(nn.Module): """ Residual block with pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. """ def __init__(self, in_channels, out_channels, stride=1): super(ResBlock, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = PreResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, stride=stride) def forward(self, x): identity = x x, x_pre_activ = self.body(x) if self.resize_identity: identity = self.identity_conv(x_pre_activ) x = x + identity return x class InterpolationBlock(nn.Module): """ Interpolation block. Parameters: ---------- scale_factor : float Multiplier for spatial size. """ def __init__(self, scale_factor): super(InterpolationBlock, self).__init__() self.scale_factor = scale_factor def forward(self, x): return F.interpolate( input=x, scale_factor=self.scale_factor, mode="bilinear", align_corners=True) class DoubleSkipBlock(nn.Module): """ Double skip connection block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(DoubleSkipBlock, self).__init__() self.skip1 = ResBlock( in_channels=in_channels, out_channels=out_channels) def forward(self, x): x = x + self.skip1(x) return x class ResBlockSequence(nn.Module): """ Sequence of residual blocks with pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. length : int Length of sequence. """ def __init__(self, in_channels, out_channels, length): super(ResBlockSequence, self).__init__() self.blocks = nn.Sequential() for i in range(length): self.blocks.add_module("block{}".format(i + 1), ResBlock( in_channels=in_channels, out_channels=out_channels)) def forward(self, x): x = self.blocks(x) return x class DownAttBlock(nn.Module): """ Down sub-block for hourglass of attention block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. length : int Length of residual blocks list. """ def __init__(self, in_channels, out_channels, length): super(DownAttBlock, self).__init__() self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) self.res_blocks = ResBlockSequence( in_channels=in_channels, out_channels=out_channels, length=length) def forward(self, x): x = self.pool(x) x = self.res_blocks(x) return x class UpAttBlock(nn.Module): """ Up sub-block for hourglass of attention block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. length : int Length of residual blocks list. scale_factor : float Multiplier for spatial size. """ def __init__(self, in_channels, out_channels, length, scale_factor): super(UpAttBlock, self).__init__() self.res_blocks = ResBlockSequence( in_channels=in_channels, out_channels=out_channels, length=length) self.upsample = InterpolationBlock(scale_factor) def forward(self, x): x = self.res_blocks(x) x = self.upsample(x) return x class MiddleAttBlock(nn.Module): """ Middle sub-block for attention block. Parameters: ---------- channels : int Number of input/output channels. """ def __init__(self, channels): super(MiddleAttBlock, self).__init__() self.conv1 = pre_conv1x1_block( in_channels=channels, out_channels=channels) self.conv2 = pre_conv1x1_block( in_channels=channels, out_channels=channels) self.sigmoid = nn.Sigmoid() def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.sigmoid(x) return x class AttBlock(nn.Module): """ Attention block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. hourglass_depth : int Depth of hourglass block. att_scales : list of int Attention block specific scales. """ def __init__(self, in_channels, out_channels, hourglass_depth, att_scales): super(AttBlock, self).__init__() assert (len(att_scales) == 3) scale_factor = 2 scale_p, scale_t, scale_r = att_scales self.init_blocks = ResBlockSequence( in_channels=in_channels, out_channels=out_channels, length=scale_p) down_seq = nn.Sequential() up_seq = nn.Sequential() skip_seq = nn.Sequential() for i in range(hourglass_depth): down_seq.add_module("down{}".format(i + 1), DownAttBlock( in_channels=in_channels, out_channels=out_channels, length=scale_r)) up_seq.add_module("up{}".format(i + 1), UpAttBlock( in_channels=in_channels, out_channels=out_channels, length=scale_r, scale_factor=scale_factor)) if i == 0: skip_seq.add_module("skip1", ResBlockSequence( in_channels=in_channels, out_channels=out_channels, length=scale_t)) else: skip_seq.add_module("skip{}".format(i + 1), DoubleSkipBlock( in_channels=in_channels, out_channels=out_channels)) self.hg = Hourglass( down_seq=down_seq, up_seq=up_seq, skip_seq=skip_seq, return_first_skip=True) self.middle_block = MiddleAttBlock(channels=out_channels) self.final_block = ResBlock( in_channels=in_channels, out_channels=out_channels) def forward(self, x): x = self.init_blocks(x) x, y = self.hg(x) x = self.middle_block(x) x = (1 + x) * y x = self.final_block(x) return x class ResAttInitBlock(nn.Module): """ ResAttNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ResAttInitBlock, self).__init__() self.conv = conv7x7_block( in_channels=in_channels, out_channels=out_channels, stride=2) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.pool(x) return x class PreActivation(nn.Module): """ Pre-activation block without convolution layer. It's used by itself as the final block in PreResNet. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(PreActivation, self).__init__() self.bn = nn.BatchNorm2d(num_features=in_channels) self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.bn(x) x = self.activ(x) return x class ResAttNet(nn.Module): """ ResAttNet model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. attentions : list of list of int Whether to use a attention unit or residual one. att_scales : list of int Attention block specific scales. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, attentions, att_scales, in_channels=3, in_size=(224, 224), num_classes=1000): super(ResAttNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResAttInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): hourglass_depth = len(channels) - 1 - i stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 1 if (i == 0) or (j != 0) else 2 if attentions[i][j]: stage.add_module("unit{}".format(j + 1), AttBlock( in_channels=in_channels, out_channels=out_channels, hourglass_depth=hourglass_depth, att_scales=att_scales)) else: stage.add_module("unit{}".format(j + 1), ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_resattnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ResAttNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 56: att_layers = [1, 1, 1] att_scales = [1, 2, 1] elif blocks == 92: att_layers = [1, 2, 3] att_scales = [1, 2, 1] elif blocks == 128: att_layers = [2, 3, 4] att_scales = [1, 2, 1] elif blocks == 164: att_layers = [3, 4, 5] att_scales = [1, 2, 1] elif blocks == 200: att_layers = [4, 5, 6] att_scales = [1, 2, 1] elif blocks == 236: att_layers = [5, 6, 7] att_scales = [1, 2, 1] elif blocks == 452: att_layers = [5, 6, 7] att_scales = [2, 4, 3] else: raise ValueError("Unsupported ResAttNet with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] layers = att_layers + [2] channels = [[ci] * (li + 1) for (ci, li) in zip(channels_per_layers, layers)] attentions = [[0] + [1] * li for li in att_layers] + [[0] * 3] net = ResAttNet( channels=channels, init_block_channels=init_block_channels, attentions=attentions, att_scales=att_scales, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def resattnet56(**kwargs): """ ResAttNet-56 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resattnet(blocks=56, model_name="resattnet56", **kwargs) def resattnet92(**kwargs): """ ResAttNet-92 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resattnet(blocks=92, model_name="resattnet92", **kwargs) def resattnet128(**kwargs): """ ResAttNet-128 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resattnet(blocks=128, model_name="resattnet128", **kwargs) def resattnet164(**kwargs): """ ResAttNet-164 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resattnet(blocks=164, model_name="resattnet164", **kwargs) def resattnet200(**kwargs): """ ResAttNet-200 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resattnet(blocks=200, model_name="resattnet200", **kwargs) def resattnet236(**kwargs): """ ResAttNet-236 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resattnet(blocks=236, model_name="resattnet236", **kwargs) def resattnet452(**kwargs): """ ResAttNet-452 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resattnet(blocks=452, model_name="resattnet452", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ resattnet56, resattnet92, resattnet128, resattnet164, resattnet200, resattnet236, resattnet452, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resattnet56 or weight_count == 31810728) assert (model != resattnet92 or weight_count == 52466344) assert (model != resattnet128 or weight_count == 65294504) assert (model != resattnet164 or weight_count == 78122664) assert (model != resattnet200 or weight_count == 90950824) assert (model != resattnet236 or weight_count == 103778984) assert (model != resattnet452 or weight_count == 182285224) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
20,035
28.464706
117
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/centernet.py
""" CenterNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Objects as Points,' https://arxiv.org/abs/1904.07850. """ __all__ = ['CenterNet', 'centernet_resnet18_voc', 'centernet_resnet18_coco', 'centernet_resnet50b_voc', 'centernet_resnet50b_coco', 'centernet_resnet101b_voc', 'centernet_resnet101b_coco', 'CenterNetHeatmapMaxDet'] import os import torch import torch.nn as nn from .common import conv1x1, conv3x3_block, DeconvBlock, Concurrent from .resnet import resnet18, resnet50b, resnet101b class CenterNetDecoderUnit(nn.Module): """ CenterNet decoder unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(CenterNetDecoderUnit, self).__init__() self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, bias=True) self.deconv = DeconvBlock( in_channels=out_channels, out_channels=out_channels, kernel_size=4, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.deconv(x) return x class CenterNetHeadBlock(nn.Module): """ CenterNet simple head block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(CenterNetHeadBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=in_channels, bias=True, use_bn=False) self.conv2 = conv1x1( in_channels=in_channels, out_channels=out_channels, bias=True) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class CenterNetHeatmapBlock(nn.Module): """ CenterNet heatmap block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. do_nms : bool Whether do NMS (or simply clip for training otherwise). """ def __init__(self, in_channels, out_channels, do_nms): super(CenterNetHeatmapBlock, self).__init__() self.do_nms = do_nms self.head = CenterNetHeadBlock( in_channels=in_channels, out_channels=out_channels) self.sigmoid = nn.Sigmoid() if self.do_nms: self.pool = nn.MaxPool2d( kernel_size=3, stride=1, padding=1) def forward(self, x): x = self.head(x) x = self.sigmoid(x) if self.do_nms: y = self.pool(x) x = x * (y == x) else: eps = 1e-4 x = x.clamp(min=eps, max=(1.0 - eps)) return x class CenterNetHeatmapMaxDet(nn.Module): """ CenterNet decoder for heads (heatmap, wh, reg). Parameters: ---------- topk : int, default 40 Keep only `topk` detections. scale : int, default is 4 Downsampling scale factor. """ def __init__(self, topk=40, scale=4): super(CenterNetHeatmapMaxDet, self).__init__() self.topk = topk self.scale = scale def forward(self, x): heatmap = x[:, :-4] wh = x[:, -4:-2] reg = x[:, -2:] batch, _, out_h, out_w = heatmap.shape scores, indices = heatmap.view((batch, -1)).topk(k=self.topk) topk_classes = (indices / (out_h * out_w)).type(torch.float32) topk_indices = indices.fmod(out_h * out_w) topk_ys = (topk_indices / out_w).type(torch.float32) topk_xs = topk_indices.fmod(out_w).type(torch.float32) center = reg.permute(0, 2, 3, 1).view((batch, -1, 2)) wh = wh.permute(0, 2, 3, 1).view((batch, -1, 2)) xs = torch.gather(center[:, :, 0], dim=-1, index=topk_indices) ys = torch.gather(center[:, :, 1], dim=-1, index=topk_indices) topk_xs = topk_xs + xs topk_ys = topk_ys + ys w = torch.gather(wh[:, :, 0], dim=-1, index=topk_indices) h = torch.gather(wh[:, :, 1], dim=-1, index=topk_indices) half_w = 0.5 * w half_h = 0.5 * h bboxes = torch.stack((topk_xs - half_w, topk_ys - half_h, topk_xs + half_w, topk_ys + half_h), dim=-1) bboxes = bboxes * self.scale topk_classes = topk_classes.unsqueeze(dim=-1) scores = scores.unsqueeze(dim=-1) result = torch.cat((bboxes, topk_classes, scores), dim=-1) return result def __repr__(self): s = "{name}(topk={topk}, scale={scale})" return s.format( name=self.__class__.__name__, topk=self.topk, scale=self.scale) def calc_flops(self, x): assert (x.shape[0] == 1) num_flops = 10 * x.size num_macs = 0 return num_flops, num_macs class CenterNet(nn.Module): """ CenterNet model from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. channels : list of int Number of output channels for each decoder unit. return_heatmap : bool, default False Whether to return only heatmap. topk : int, default 40 Keep only `topk` detections. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (512, 512) Spatial size of the expected input image. num_classes : int, default 80 Number of classification classes. """ def __init__(self, backbone, backbone_out_channels, channels, return_heatmap=False, topk=40, in_channels=3, in_size=(512, 512), num_classes=80): super(CenterNet, self).__init__() self.in_size = in_size self.in_channels = in_channels self.return_heatmap = return_heatmap self.backbone = backbone self.decoder = nn.Sequential() in_channels = backbone_out_channels for i, out_channels in enumerate(channels): self.decoder.add_module("unit{}".format(i + 1), CenterNetDecoderUnit( in_channels=in_channels, out_channels=out_channels)) in_channels = out_channels heads = Concurrent() heads.add_module("heapmap_block", CenterNetHeatmapBlock( in_channels=in_channels, out_channels=num_classes, do_nms=(not self.return_heatmap))) heads.add_module("wh_block", CenterNetHeadBlock( in_channels=in_channels, out_channels=2)) heads.add_module("reg_block", CenterNetHeadBlock( in_channels=in_channels, out_channels=2)) self.decoder.add_module("heads", heads) if not self.return_heatmap: self.heatmap_max_det = CenterNetHeatmapMaxDet( topk=topk, scale=4) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.backbone(x) x = self.decoder(x) if not self.return_heatmap: x = self.heatmap_max_det(x) return x def get_centernet(backbone, backbone_out_channels, num_classes, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create CenterNet model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. num_classes : int Number of classes. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. Returns: ------- nn.Module A network. """ channels = [256, 128, 64] net = CenterNet( backbone=backbone, backbone_out_channels=backbone_out_channels, channels=channels, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def centernet_resnet18_voc(pretrained_backbone=False, num_classes=20, **kwargs): """ CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 20 Number of classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnet18(pretrained=pretrained_backbone).features del backbone[-1] return get_centernet(backbone=backbone, backbone_out_channels=512, num_classes=num_classes, model_name="centernet_resnet18_voc", **kwargs) def centernet_resnet18_coco(pretrained_backbone=False, num_classes=80, **kwargs): """ CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 80 Number of classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnet18(pretrained=pretrained_backbone).features del backbone[-1] return get_centernet(backbone=backbone, backbone_out_channels=512, num_classes=num_classes, model_name="centernet_resnet18_coco", **kwargs) def centernet_resnet50b_voc(pretrained_backbone=False, num_classes=20, **kwargs): """ CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 20 Number of classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnet50b(pretrained=pretrained_backbone).features del backbone[-1] return get_centernet(backbone=backbone, backbone_out_channels=2048, num_classes=num_classes, model_name="centernet_resnet50b_voc", **kwargs) def centernet_resnet50b_coco(pretrained_backbone=False, num_classes=80, **kwargs): """ CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 80 Number of classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnet50b(pretrained=pretrained_backbone).features del backbone[-1] return get_centernet(backbone=backbone, backbone_out_channels=2048, num_classes=num_classes, model_name="centernet_resnet50b_coco", **kwargs) def centernet_resnet101b_voc(pretrained_backbone=False, num_classes=20, **kwargs): """ CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 20 Number of classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnet101b(pretrained=pretrained_backbone).features del backbone[-1] return get_centernet(backbone=backbone, backbone_out_channels=2048, num_classes=num_classes, model_name="centernet_resnet101b_voc", **kwargs) def centernet_resnet101b_coco(pretrained_backbone=False, num_classes=80, **kwargs): """ CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 80 Number of classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnet101b(pretrained=pretrained_backbone).features del backbone[-1] return get_centernet(backbone=backbone, backbone_out_channels=2048, num_classes=num_classes, model_name="centernet_resnet101b_coco", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): in_size = (512, 512) topk = 40 return_heatmap = False pretrained = False models = [ (centernet_resnet18_voc, 20), (centernet_resnet18_coco, 80), (centernet_resnet50b_voc, 20), (centernet_resnet50b_coco, 80), (centernet_resnet101b_voc, 20), (centernet_resnet101b_coco, 80), ] for model, classes in models: net = model(pretrained=pretrained, topk=topk, in_size=in_size, return_heatmap=return_heatmap) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != centernet_resnet18_voc or weight_count == 14215640) assert (model != centernet_resnet18_coco or weight_count == 14219540) assert (model != centernet_resnet50b_voc or weight_count == 30086104) assert (model != centernet_resnet50b_coco or weight_count == 30090004) assert (model != centernet_resnet101b_voc or weight_count == 49078232) assert (model != centernet_resnet101b_coco or weight_count == 49082132) batch = 14 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) assert (y.shape[0] == batch) if return_heatmap: assert (y.shape[1] == classes + 4) and (y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4) else: assert (y.shape[1] == topk) and (y.shape[2] == 6) if __name__ == "__main__": _test()
16,535
32.204819
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/xdensenet_cifar.py
""" X-DenseNet for CIFAR/SVHN, implemented in PyTorch. Original paper: 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. """ __all__ = ['CIFARXDenseNet', 'xdensenet40_2_k24_bc_cifar10', 'xdensenet40_2_k24_bc_cifar100', 'xdensenet40_2_k24_bc_svhn', 'xdensenet40_2_k36_bc_cifar10', 'xdensenet40_2_k36_bc_cifar100', 'xdensenet40_2_k36_bc_svhn'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv3x3 from .preresnet import PreResActivation from .densenet import TransitionBlock from .xdensenet import pre_xconv3x3_block, XDenseUnit class XDenseSimpleUnit(nn.Module): """ X-DenseNet simple unit for CIFAR. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. expand_ratio : int Ratio of expansion. """ def __init__(self, in_channels, out_channels, dropout_rate, expand_ratio): super(XDenseSimpleUnit, self).__init__() self.use_dropout = (dropout_rate != 0.0) inc_channels = out_channels - in_channels self.conv = pre_xconv3x3_block( in_channels=in_channels, out_channels=inc_channels, expand_ratio=expand_ratio) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x): identity = x x = self.conv(x) if self.use_dropout: x = self.dropout(x) x = torch.cat((identity, x), dim=1) return x class CIFARXDenseNet(nn.Module): """ X-DenseNet model for CIFAR from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. expand_ratio : int, default 2 Ratio of expansion. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, dropout_rate=0.0, expand_ratio=2, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARXDenseNet, self).__init__() self.in_size = in_size self.num_classes = num_classes unit_class = XDenseUnit if bottleneck else XDenseSimpleUnit self.features = nn.Sequential() self.features.add_module("init_block", conv3x3( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() if i != 0: stage.add_module("trans{}".format(i + 1), TransitionBlock( in_channels=in_channels, out_channels=(in_channels // 2))) in_channels = in_channels // 2 for j, out_channels in enumerate(channels_per_stage): stage.add_module("unit{}".format(j + 1), unit_class( in_channels=in_channels, out_channels=out_channels, dropout_rate=dropout_rate, expand_ratio=expand_ratio)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_xdensenet_cifar(num_classes, blocks, growth_rate, bottleneck, expand_ratio=2, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create X-DenseNet model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification classes. blocks : int Number of blocks. growth_rate : int Growth rate. bottleneck : bool Whether to use a bottleneck or simple block in units. expand_ratio : int, default 2 Ratio of expansion. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (num_classes in [10, 100]) if bottleneck: assert ((blocks - 4) % 6 == 0) layers = [(blocks - 4) // 6] * 3 else: assert ((blocks - 4) % 3 == 0) layers = [(blocks - 4) // 3] * 3 init_block_channels = 2 * growth_rate from functools import reduce channels = reduce( lambda xi, yi: xi + [reduce( lambda xj, yj: xj + [xj[-1] + yj], [growth_rate] * yi, [xi[-1][-1] // 2])[1:]], layers, [[init_block_channels * 2]])[1:] net = CIFARXDenseNet( channels=channels, init_block_channels=init_block_channels, num_classes=num_classes, bottleneck=bottleneck, expand_ratio=expand_ratio, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def xdensenet40_2_k24_bc_cifar10(num_classes=10, **kwargs): """ X-DenseNet-BC-40-2 (k=24) model for CIFAR-10 from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=24, bottleneck=True, model_name="xdensenet40_2_k24_bc_cifar10", **kwargs) def xdensenet40_2_k24_bc_cifar100(num_classes=100, **kwargs): """ X-DenseNet-BC-40-2 (k=24) model for CIFAR-100 from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=24, bottleneck=True, model_name="xdensenet40_2_k24_bc_cifar100", **kwargs) def xdensenet40_2_k24_bc_svhn(num_classes=10, **kwargs): """ X-DenseNet-BC-40-2 (k=24) model for SVHN from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=24, bottleneck=True, model_name="xdensenet40_2_k24_bc_svhn", **kwargs) def xdensenet40_2_k36_bc_cifar10(num_classes=10, **kwargs): """ X-DenseNet-BC-40-2 (k=36) model for CIFAR-10 from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=36, bottleneck=True, model_name="xdensenet40_2_k36_bc_cifar10", **kwargs) def xdensenet40_2_k36_bc_cifar100(num_classes=100, **kwargs): """ X-DenseNet-BC-40-2 (k=36) model for CIFAR-100 from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=36, bottleneck=True, model_name="xdensenet40_2_k36_bc_cifar100", **kwargs) def xdensenet40_2_k36_bc_svhn(num_classes=10, **kwargs): """ X-DenseNet-BC-40-2 (k=36) model for SVHN from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=36, bottleneck=True, model_name="xdensenet40_2_k36_bc_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (xdensenet40_2_k24_bc_cifar10, 10), (xdensenet40_2_k24_bc_cifar100, 100), (xdensenet40_2_k24_bc_svhn, 10), (xdensenet40_2_k36_bc_cifar10, 10), (xdensenet40_2_k36_bc_cifar100, 100), (xdensenet40_2_k36_bc_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != xdensenet40_2_k24_bc_cifar10 or weight_count == 690346) assert (model != xdensenet40_2_k24_bc_cifar100 or weight_count == 714196) assert (model != xdensenet40_2_k24_bc_svhn or weight_count == 690346) assert (model != xdensenet40_2_k36_bc_cifar10 or weight_count == 1542682) assert (model != xdensenet40_2_k36_bc_cifar100 or weight_count == 1578412) assert (model != xdensenet40_2_k36_bc_svhn or weight_count == 1542682) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes)) if __name__ == "__main__": _test()
12,852
33.831978
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/revnet.py
""" RevNet for ImageNet-1K, implemented in PyTorch. Original paper: 'The Reversible Residual Network: Backpropagation Without Storing Activations,' https://arxiv.org/abs/1707.04585. """ __all__ = ['RevNet', 'revnet38', 'revnet110', 'revnet164'] import os from contextlib import contextmanager import torch import torch.nn as nn import torch.nn.init as init from torch.autograd import Variable from .common import conv1x1, conv3x3, conv1x1_block, conv3x3_block, pre_conv1x1_block, pre_conv3x3_block use_context_mans = int( torch.__version__[0]) * 100 + int(torch.__version__[2]) - (1 if 'a' in torch.__version__ else 0) > 3 @contextmanager def set_grad_enabled(grad_mode): if not use_context_mans: yield else: with torch.set_grad_enabled(grad_mode) as c: yield [c] class ReversibleBlockFunction(torch.autograd.Function): """ RevNet reversible block function. """ @staticmethod def forward(ctx, x, fm, gm, *params): with torch.no_grad(): x1, x2 = torch.chunk(x, chunks=2, dim=1) x1 = x1.contiguous() x2 = x2.contiguous() y1 = x1 + fm(x2) y2 = x2 + gm(y1) y = torch.cat((y1, y2), dim=1) x1.set_() x2.set_() y1.set_() y2.set_() del x1, x2, y1, y2 ctx.save_for_backward(x, y) ctx.fm = fm ctx.gm = gm return y @staticmethod def backward(ctx, grad_y): fm = ctx.fm gm = ctx.gm x, y = ctx.saved_variables y1, y2 = torch.chunk(y, chunks=2, dim=1) y1 = y1.contiguous() y2 = y2.contiguous() with torch.no_grad(): y1_z = Variable(y1.data, requires_grad=True) x2 = y2 - gm(y1_z) x1 = y1 - fm(x2) with set_grad_enabled(True): x1_ = Variable(x1.data, requires_grad=True) x2_ = Variable(x2.data, requires_grad=True) y1_ = x1_ + fm.forward(x2_) y2_ = x2_ + gm(y1_) y = torch.cat((y1_, y2_), dim=1) dd = torch.autograd.grad(y, (x1_, x2_) + tuple(gm.parameters()) + tuple(fm.parameters()), grad_y) gm_params_len = len([p for p in gm.parameters()]) gm_params_grads = dd[2:2 + gm_params_len] fm_params_grads = dd[2 + gm_params_len:] grad_x = torch.cat((dd[0], dd[1]), dim=1) y1_.detach_() y2_.detach_() del y1_, y2_ x.data.set_(torch.cat((x1, x2), dim=1).data.contiguous()) return (grad_x, None, None) + fm_params_grads + gm_params_grads class ReversibleBlock(nn.Module): """ RevNet reversible block. Parameters: ---------- fm : nn.Module Fm-function. gm : nn.Module Gm-function. """ def __init__(self, fm, gm): super(ReversibleBlock, self).__init__() self.gm = gm self.fm = fm self.rev_funct = ReversibleBlockFunction.apply def forward(self, x): assert (x.shape[1] % 2 == 0) params = [w for w in self.fm.parameters()] + [w for w in self.gm.parameters()] y = self.rev_funct(x, self.fm, self.gm, *params) x.data.set_() return y def inverse(self, y): assert (y.shape[1] % 2 == 0) y1, y2 = torch.chunk(y, chunks=2, dim=1) y1 = y1.contiguous() y2 = y2.contiguous() x2 = y2 - self.gm(y1) x1 = y1 - self.fm(x2) x = torch.cat((x1, x2), dim=1) return x class RevResBlock(nn.Module): """ Simple RevNet block for residual path in RevNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. preactivate : bool Whether use pre-activation for the first convolution block. """ def __init__(self, in_channels, out_channels, stride, preactivate): super(RevResBlock, self).__init__() if preactivate: self.conv1 = pre_conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride) else: self.conv1 = conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride) self.conv2 = pre_conv3x3_block( in_channels=out_channels, out_channels=out_channels) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class RevResBottleneck(nn.Module): """ RevNet bottleneck block for residual path in RevNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. preactivate : bool Whether use pre-activation for the first convolution block. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, preactivate, bottleneck_factor=4): super(RevResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor if preactivate: self.conv1 = pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels) else: self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride) self.conv3 = pre_conv1x1_block( in_channels=mid_channels, out_channels=out_channels) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class RevUnit(nn.Module): """ RevNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. preactivate : bool Whether use pre-activation for the first convolution block. """ def __init__(self, in_channels, out_channels, stride, bottleneck, preactivate): super(RevUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) body_class = RevResBottleneck if bottleneck else RevResBlock if (not self.resize_identity) and (stride == 1): assert (in_channels % 2 == 0) assert (out_channels % 2 == 0) in_channels2 = in_channels // 2 out_channels2 = out_channels // 2 gm = body_class( in_channels=in_channels2, out_channels=out_channels2, stride=1, preactivate=preactivate) fm = body_class( in_channels=in_channels2, out_channels=out_channels2, stride=1, preactivate=preactivate) self.body = ReversibleBlock(gm, fm) else: self.body = body_class( in_channels=in_channels, out_channels=out_channels, stride=stride, preactivate=preactivate) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) x = self.body(x) x = x + identity else: x = self.body(x) return x class RevPostActivation(nn.Module): """ RevNet specific post-activation block. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(RevPostActivation, self).__init__() self.bn = nn.BatchNorm2d(num_features=in_channels) self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.bn(x) x = self.activ(x) return x class RevNet(nn.Module): """ RevNet model from 'The Reversible Residual Network: Backpropagation Without Storing Activations,' https://arxiv.org/abs/1707.04585. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(224, 224), num_classes=1000): super(RevNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 preactivate = (j != 0) or (i != 0) stage.add_module("unit{}".format(j + 1), RevUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, preactivate=preactivate)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_postactiv", RevPostActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=56, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_revnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create RevNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 38: layers = [3, 3, 3] channels_per_layers = [32, 64, 112] bottleneck = False elif blocks == 110: layers = [9, 9, 9] channels_per_layers = [32, 64, 128] bottleneck = False elif blocks == 164: layers = [9, 9, 9] channels_per_layers = [128, 256, 512] bottleneck = True else: raise ValueError("Unsupported RevNet with number of blocks: {}".format(blocks)) init_block_channels = 32 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = RevNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def revnet38(**kwargs): """ RevNet-38 model from 'The Reversible Residual Network: Backpropagation Without Storing Activations,' https://arxiv.org/abs/1707.04585. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_revnet(blocks=38, model_name="revnet38", **kwargs) def revnet110(**kwargs): """ RevNet-110 model from 'The Reversible Residual Network: Backpropagation Without Storing Activations,' https://arxiv.org/abs/1707.04585. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_revnet(blocks=110, model_name="revnet110", **kwargs) def revnet164(**kwargs): """ RevNet-164 model from 'The Reversible Residual Network: Backpropagation Without Storing Activations,' https://arxiv.org/abs/1707.04585. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_revnet(blocks=164, model_name="revnet164", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ revnet38, revnet110, revnet164, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != revnet38 or weight_count == 685864) assert (model != revnet110 or weight_count == 1982600) assert (model != revnet164 or weight_count == 2491656) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
15,590
28.142056
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/ntsnet_cub.py
""" NTS-Net for CUB-200-2011, implemented in PyTorch. Original paper: 'Learning to Navigate for Fine-grained Classification,' https://arxiv.org/abs/1809.00287. """ __all__ = ['NTSNet', 'ntsnet_cub'] import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from .common import conv1x1, conv3x3, Flatten from .resnet import resnet50b def hard_nms(cdds, top_n=10, iou_thresh=0.25): """ Hard Non-Maximum Suppression. Parameters: ---------- cdds : np.array Borders. top_n : int, default 10 Number of top-K informative regions. iou_thresh : float, default 0.25 IoU threshold. Returns: ------- np.array Filtered borders. """ assert (type(cdds) == np.ndarray) assert (len(cdds.shape) == 2) assert (cdds.shape[1] >= 5) cdds = cdds.copy() indices = np.argsort(cdds[:, 0]) cdds = cdds[indices] cdd_results = [] res = cdds while res.any(): cdd = res[-1] cdd_results.append(cdd) if len(cdd_results) == top_n: return np.array(cdd_results) res = res[:-1] start_max = np.maximum(res[:, 1:3], cdd[1:3]) end_min = np.minimum(res[:, 3:5], cdd[3:5]) lengths = end_min - start_max intersec_map = lengths[:, 0] * lengths[:, 1] intersec_map[np.logical_or(lengths[:, 0] < 0, lengths[:, 1] < 0)] = 0 iou_map_cur = intersec_map / ((res[:, 3] - res[:, 1]) * (res[:, 4] - res[:, 2]) + (cdd[3] - cdd[1]) * ( cdd[4] - cdd[2]) - intersec_map) res = res[iou_map_cur < iou_thresh] return np.array(cdd_results) class NavigatorBranch(nn.Module): """ Navigator branch block for Navigator unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(NavigatorBranch, self).__init__() mid_channels = 128 self.down_conv = conv3x3( in_channels=in_channels, out_channels=mid_channels, stride=stride, bias=True) self.activ = nn.ReLU(inplace=False) self.tidy_conv = conv1x1( in_channels=mid_channels, out_channels=out_channels, bias=True) self.flatten = Flatten() def forward(self, x): y = self.down_conv(x) y = self.activ(y) z = self.tidy_conv(y) z = self.flatten(z) return z, y class NavigatorUnit(nn.Module): """ Navigator init. """ def __init__(self): super(NavigatorUnit, self).__init__() self.branch1 = NavigatorBranch( in_channels=2048, out_channels=6, stride=1) self.branch2 = NavigatorBranch( in_channels=128, out_channels=6, stride=2) self.branch3 = NavigatorBranch( in_channels=128, out_channels=9, stride=2) def forward(self, x): t1, x = self.branch1(x) t2, x = self.branch2(x) t3, _ = self.branch3(x) return torch.cat((t1, t2, t3), dim=1) class NTSNet(nn.Module): """ NTS-Net model from 'Learning to Navigate for Fine-grained Classification,' https://arxiv.org/abs/1809.00287. Parameters: ---------- backbone : nn.Sequential Feature extractor. aux : bool, default False Whether to output auxiliary results. top_n : int, default 4 Number of extra top-K informative regions. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, backbone, aux=False, top_n=4, in_channels=3, in_size=(448, 448), num_classes=200): super(NTSNet, self).__init__() assert (in_channels > 0) self.in_size = in_size self.num_classes = num_classes pad_side = 224 pad_width = (pad_side, pad_side, pad_side, pad_side) self.top_n = top_n self.aux = aux self.num_cat = 4 _, edge_anchors, _ = self._generate_default_anchor_maps() self.edge_anchors = (edge_anchors + 224).astype(np.int) self.edge_anchors = np.concatenate( (self.edge_anchors.copy(), np.arange(0, len(self.edge_anchors)).reshape(-1, 1)), axis=1) self.backbone = backbone self.backbone_tail = nn.Sequential() self.backbone_tail.add_module("final_pool", nn.AdaptiveAvgPool2d(1)) self.backbone_tail.add_module("flatten", Flatten()) self.backbone_tail.add_module("dropout", nn.Dropout(p=0.5)) self.backbone_classifier = nn.Linear( in_features=(512 * 4), out_features=num_classes) self.pad = nn.ZeroPad2d(padding=pad_width) self.navigator_unit = NavigatorUnit() self.concat_net = nn.Linear( in_features=(2048 * (self.num_cat + 1)), out_features=num_classes) if self.aux: self.partcls_net = nn.Linear( in_features=(512 * 4), out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): raw_pre_features = self.backbone(x) rpn_score = self.navigator_unit(raw_pre_features) all_cdds = [np.concatenate((y.reshape(-1, 1), self.edge_anchors.copy()), axis=1) for y in rpn_score.detach().cpu().numpy()] top_n_cdds = [hard_nms(y, top_n=self.top_n, iou_thresh=0.25) for y in all_cdds] top_n_cdds = np.array(top_n_cdds) top_n_index = top_n_cdds[:, :, -1].astype(np.int64) top_n_index = torch.from_numpy(top_n_index).long().to(x.device) top_n_prob = torch.gather(rpn_score, dim=1, index=top_n_index) batch = x.size(0) part_imgs = torch.zeros(batch, self.top_n, 3, 224, 224, dtype=x.dtype, device=x.device) x_pad = self.pad(x) for i in range(batch): for j in range(self.top_n): y0, x0, y1, x1 = tuple(top_n_cdds[i][j, 1:5].astype(np.int64)) part_imgs[i:i + 1, j] = F.interpolate( input=x_pad[i:i + 1, :, y0:y1, x0:x1], size=(224, 224), mode="bilinear", align_corners=True) part_imgs = part_imgs.view(batch * self.top_n, 3, 224, 224) part_features = self.backbone_tail(self.backbone(part_imgs.detach())) part_feature = part_features.view(batch, self.top_n, -1) part_feature = part_feature[:, :self.num_cat, :].contiguous() part_feature = part_feature.view(batch, -1) raw_features = self.backbone_tail(raw_pre_features.detach()) concat_out = torch.cat((part_feature, raw_features), dim=1) concat_logits = self.concat_net(concat_out) if self.aux: raw_logits = self.backbone_classifier(raw_features) part_logits = self.partcls_net(part_features).view(batch, self.top_n, -1) return concat_logits, raw_logits, part_logits, top_n_prob else: return concat_logits @staticmethod def _generate_default_anchor_maps(input_shape=(448, 448)): """ Generate default anchor maps. Parameters: ---------- input_shape : tuple of 2 int Input image size. Returns: ------- center_anchors : np.array anchors * 4 (oy, ox, h, w). edge_anchors : np.array anchors * 4 (y0, x0, y1, x1). anchor_area : np.array anchors * 1 (area). """ anchor_scale = [2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)] anchor_aspect_ratio = [0.667, 1, 1.5] anchors_setting = ( dict(layer="p3", stride=32, size=48, scale=anchor_scale, aspect_ratio=anchor_aspect_ratio), dict(layer="p4", stride=64, size=96, scale=anchor_scale, aspect_ratio=anchor_aspect_ratio), dict(layer="p5", stride=128, size=192, scale=[1, anchor_scale[0], anchor_scale[1]], aspect_ratio=anchor_aspect_ratio), ) center_anchors = np.zeros((0, 4), dtype=np.float32) edge_anchors = np.zeros((0, 4), dtype=np.float32) anchor_areas = np.zeros((0,), dtype=np.float32) input_shape = np.array(input_shape, dtype=int) for anchor_info in anchors_setting: stride = anchor_info["stride"] size = anchor_info["size"] scales = anchor_info["scale"] aspect_ratios = anchor_info["aspect_ratio"] output_map_shape = np.ceil(input_shape.astype(np.float32) / stride) output_map_shape = output_map_shape.astype(np.int) output_shape = tuple(output_map_shape) + (4, ) ostart = stride / 2.0 oy = np.arange(ostart, ostart + stride * output_shape[0], stride) oy = oy.reshape(output_shape[0], 1) ox = np.arange(ostart, ostart + stride * output_shape[1], stride) ox = ox.reshape(1, output_shape[1]) center_anchor_map_template = np.zeros(output_shape, dtype=np.float32) center_anchor_map_template[:, :, 0] = oy center_anchor_map_template[:, :, 1] = ox for anchor_scale in scales: for anchor_aspect_ratio in aspect_ratios: center_anchor_map = center_anchor_map_template.copy() center_anchor_map[:, :, 2] = size * anchor_scale / float(anchor_aspect_ratio) ** 0.5 center_anchor_map[:, :, 3] = size * anchor_scale * float(anchor_aspect_ratio) ** 0.5 edge_anchor_map = np.concatenate( (center_anchor_map[:, :, :2] - center_anchor_map[:, :, 2:4] / 2.0, center_anchor_map[:, :, :2] + center_anchor_map[:, :, 2:4] / 2.0), axis=-1) anchor_area_map = center_anchor_map[:, :, 2] * center_anchor_map[:, :, 3] center_anchors = np.concatenate((center_anchors, center_anchor_map.reshape(-1, 4))) edge_anchors = np.concatenate((edge_anchors, edge_anchor_map.reshape(-1, 4))) anchor_areas = np.concatenate((anchor_areas, anchor_area_map.reshape(-1))) return center_anchors, edge_anchors, anchor_areas def get_ntsnet(backbone, aux=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create NTS-Net model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. aux : bool, default False Whether to output auxiliary results. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = NTSNet( backbone=backbone, aux=aux, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def ntsnet_cub(pretrained_backbone=False, aux=True, **kwargs): """ NTS-Net model from 'Learning to Navigate for Fine-grained Classification,' https://arxiv.org/abs/1809.00287. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnet50b(pretrained=pretrained_backbone).features del backbone[-1] return get_ntsnet(backbone=backbone, aux=aux, model_name="ntsnet_cub", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False aux = True models = [ ntsnet_cub, ] for model in models: net = model(pretrained=pretrained, aux=aux) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) if aux: assert (model != ntsnet_cub or weight_count == 29033133) else: assert (model != ntsnet_cub or weight_count == 28623333) x = torch.randn(5, 3, 448, 448) ys = net(x) y = ys[0] if aux else ys y.sum().backward() assert (tuple(y.size()) == (5, 200)) if __name__ == "__main__": _test()
14,019
32.54067
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/proxylessnas_cub.py
""" ProxylessNAS for CUB-200-2011, implemented in Gluon. Original paper: 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. """ __all__ = ['proxylessnas_cpu_cub', 'proxylessnas_gpu_cub', 'proxylessnas_mobile_cub', 'proxylessnas_mobile14_cub'] from .proxylessnas import get_proxylessnas def proxylessnas_cpu_cub(num_classes=200, **kwargs): """ ProxylessNAS (CPU) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- num_classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_proxylessnas(num_classes=num_classes, version="cpu", model_name="proxylessnas_cpu_cub", **kwargs) def proxylessnas_gpu_cub(num_classes=200, **kwargs): """ ProxylessNAS (GPU) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- num_classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_proxylessnas(num_classes=num_classes, version="gpu", model_name="proxylessnas_gpu_cub", **kwargs) def proxylessnas_mobile_cub(num_classes=200, **kwargs): """ ProxylessNAS (Mobile) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- num_classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_proxylessnas(num_classes=num_classes, version="mobile", model_name="proxylessnas_mobile_cub", **kwargs) def proxylessnas_mobile14_cub(num_classes=200, **kwargs): """ ProxylessNAS (Mobile-14) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- num_classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_proxylessnas(num_classes=num_classes, version="mobile14", model_name="proxylessnas_mobile14_cub", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ proxylessnas_cpu_cub, proxylessnas_gpu_cub, proxylessnas_mobile_cub, proxylessnas_mobile14_cub, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != proxylessnas_cpu_cub or weight_count == 3215248) assert (model != proxylessnas_gpu_cub or weight_count == 5736648) assert (model != proxylessnas_mobile_cub or weight_count == 3055712) assert (model != proxylessnas_mobile14_cub or weight_count == 5423168) x = torch.randn(14, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (14, 200)) if __name__ == "__main__": _test()
4,155
32.788618
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/ibnresnet.py
""" IBN-ResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. """ __all__ = ['IBNResNet', 'ibn_resnet50', 'ibn_resnet101', 'ibn_resnet152'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block, IBN from .resnet import ResInitBlock class IBNConvBlock(nn.Module): """ IBN-Net specific convolution block with BN/IBN normalization and ReLU activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_ibn : bool, default False Whether use Instance-Batch Normalization. activate : bool, default True Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, use_ibn=False, activate=True): super(IBNConvBlock, self).__init__() self.activate = activate self.use_ibn = use_ibn self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) if self.use_ibn: self.ibn = IBN(channels=out_channels) else: self.bn = nn.BatchNorm2d(num_features=out_channels) if self.activate: self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) if self.use_ibn: x = self.ibn(x) else: x = self.bn(x) if self.activate: x = self.activ(x) return x def ibn_conv1x1_block(in_channels, out_channels, stride=1, groups=1, bias=False, use_ibn=False, activate=True): """ 1x1 version of the IBN-Net specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_ibn : bool, default False Whether use Instance-Batch Normalization. activate : bool, default True Whether activate the convolution block. """ return IBNConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, groups=groups, bias=bias, use_ibn=use_ibn, activate=activate) class IBNResBottleneck(nn.Module): """ IBN-ResNet bottleneck block for residual path in IBN-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. conv1_ibn : bool Whether to use IBN normalization in the first convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, conv1_ibn): super(IBNResBottleneck, self).__init__() mid_channels = out_channels // 4 self.conv1 = ibn_conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_ibn=conv1_ibn) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class IBNResUnit(nn.Module): """ IBN-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. conv1_ibn : bool Whether to use IBN normalization in the first convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, conv1_ibn): super(IBNResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = IBNResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_ibn=conv1_ibn) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class IBNResNet(nn.Module): """ IBN-ResNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, in_channels=3, in_size=(224, 224), num_classes=1000): super(IBNResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 conv1_ibn = (out_channels < 2048) stage.add_module("unit{}".format(j + 1), IBNResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_ibn=conv1_ibn)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_ibnresnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create IBN-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] else: raise ValueError("Unsupported IBN-ResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = IBNResNet( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def ibn_resnet50(**kwargs): """ IBN-ResNet-50 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ibnresnet(blocks=50, model_name="ibn_resnet50", **kwargs) def ibn_resnet101(**kwargs): """ IBN-ResNet-101 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ibnresnet(blocks=101, model_name="ibn_resnet101", **kwargs) def ibn_resnet152(**kwargs): """ IBN-ResNet-152 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ibnresnet(blocks=152, model_name="ibn_resnet152", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ ibn_resnet50, ibn_resnet101, ibn_resnet152, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != ibn_resnet50 or weight_count == 25557032) assert (model != ibn_resnet101 or weight_count == 44549160) assert (model != ibn_resnet152 or weight_count == 60192808) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
12,570
29.002387
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/common.py
""" Common routines for models in PyTorch. """ __all__ = ['round_channels', 'Identity', 'BreakBlock', 'Swish', 'HSigmoid', 'HSwish', 'get_activation_layer', 'SelectableDense', 'DenseBlock', 'ConvBlock1d', 'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'ConvBlock', 'conv1x1_block', 'conv3x3_block', 'conv5x5_block', 'conv7x7_block', 'dwconv_block', 'dwconv3x3_block', 'dwconv5x5_block', 'dwsconv3x3_block', 'PreConvBlock', 'pre_conv1x1_block', 'pre_conv3x3_block', 'AsymConvBlock', 'asym_conv3x3_block', 'DeconvBlock', 'deconv3x3_block', 'NormActivation', 'InterpolationBlock', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'SABlock', 'SAConvBlock', 'saconv3x3_block', 'DucBlock', 'IBN', 'DualPathSequential', 'Concurrent', 'SequentialConcurrent', 'ParametricSequential', 'ParametricConcurrent', 'Hourglass', 'SesquialteralHourglass', 'MultiOutputSequential', 'ParallelConcurent', 'DualPathParallelConcurent', 'Flatten', 'HeatmapMaxDetBlock'] import math from inspect import isfunction import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter def round_channels(channels, divisor=8): """ Round weighted channel number (make divisible operation). Parameters: ---------- channels : int or float Original number of channels. divisor : int, default 8 Alignment value. Returns: ------- int Weighted number of channels. """ rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor) if float(rounded_channels) < 0.9 * channels: rounded_channels += divisor return rounded_channels class Identity(nn.Module): """ Identity block. """ def __init__(self): super(Identity, self).__init__() def forward(self, x): return x def __repr__(self): return '{name}()'.format(name=self.__class__.__name__) class BreakBlock(nn.Module): """ Break coonnection block for hourglass. """ def __init__(self): super(BreakBlock, self).__init__() def forward(self, x): return None def __repr__(self): return '{name}()'.format(name=self.__class__.__name__) class Swish(nn.Module): """ Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941. """ def forward(self, x): return x * torch.sigmoid(x) class HSigmoid(nn.Module): """ Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. """ def forward(self, x): return F.relu6(x + 3.0, inplace=True) / 6.0 class HSwish(nn.Module): """ H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- inplace : bool Whether to use inplace version of the module. """ def __init__(self, inplace=False): super(HSwish, self).__init__() self.inplace = inplace def forward(self, x): return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0 def get_activation_layer(activation): """ Create activation layer from string/function. Parameters: ---------- activation : function, or str, or nn.Module Activation function or name of activation function. Returns: ------- nn.Module Activation layer. """ assert (activation is not None) if isfunction(activation): return activation() elif isinstance(activation, str): if activation == "relu": return nn.ReLU(inplace=True) elif activation == "relu6": return nn.ReLU6(inplace=True) elif activation == "swish": return Swish() elif activation == "hswish": return HSwish(inplace=True) elif activation == "sigmoid": return nn.Sigmoid() elif activation == "hsigmoid": return HSigmoid() elif activation == "identity": return Identity() else: raise NotImplementedError() else: assert (isinstance(activation, nn.Module)) return activation class SelectableDense(nn.Module): """ Selectable dense layer. Parameters: ---------- in_features : int Number of input features. out_features : int Number of output features. bias : bool, default False Whether the layer uses a bias vector. num_options : int, default 1 Number of selectable options. """ def __init__(self, in_features, out_features, bias=False, num_options=1): super(SelectableDense, self).__init__() self.in_features = in_features self.out_features = out_features self.use_bias = bias self.num_options = num_options self.weight = Parameter(torch.Tensor(num_options, out_features, in_features)) if bias: self.bias = Parameter(torch.Tensor(num_options, out_features)) else: self.register_parameter("bias", None) def forward(self, x, indices): weight = torch.index_select(self.weight, dim=0, index=indices) x = x.unsqueeze(-1) x = weight.bmm(x) x = x.squeeze(dim=-1) if self.use_bias: bias = torch.index_select(self.bias, dim=0, index=indices) x += bias return x def extra_repr(self): return "in_features={}, out_features={}, bias={}, num_options={}".format( self.in_features, self.out_features, self.use_bias, self.num_options) class DenseBlock(nn.Module): """ Standard dense block with Batch normalization and activation. Parameters: ---------- in_features : int Number of input features. out_features : int Number of output features. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ def __init__(self, in_features, out_features, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): super(DenseBlock, self).__init__() self.activate = (activation is not None) self.use_bn = use_bn self.fc = nn.Linear( in_features=in_features, out_features=out_features, bias=bias) if self.use_bn: self.bn = nn.BatchNorm1d( num_features=out_features, eps=bn_eps) if self.activate: self.activ = get_activation_layer(activation) def forward(self, x): x = self.fc(x) if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) return x class ConvBlock1d(nn.Module): """ Standard 1D convolution block with Batch normalization and activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int Convolution window size. stride : int Strides of the convolution. padding : int Padding value for convolution layer. dilation : int Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): super(ConvBlock1d, self).__init__() self.activate = (activation is not None) self.use_bn = use_bn self.conv = nn.Conv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) if self.use_bn: self.bn = nn.BatchNorm1d( num_features=out_channels, eps=bn_eps) if self.activate: self.activ = get_activation_layer(activation) def forward(self, x): x = self.conv(x) if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) return x def conv1x1(in_channels, out_channels, stride=1, groups=1, bias=False): """ Convolution 1x1 layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. """ return nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, groups=groups, bias=bias) def conv3x3(in_channels, out_channels, stride=1, padding=1, dilation=1, groups=1, bias=False): """ Convolution 3x3 layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. """ return nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) def depthwise_conv3x3(channels, stride=1, padding=1, dilation=1, bias=False): """ Depthwise convolution 3x3 layer. Parameters: ---------- channels : int Number of input/output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. """ return nn.Conv2d( in_channels=channels, out_channels=channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, groups=channels, bias=bias) class ConvBlock(nn.Module): """ Standard convolution block with Batch normalization and activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int, or tuple/list of 2 int, or tuple/list of 4 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): super(ConvBlock, self).__init__() self.activate = (activation is not None) self.use_bn = use_bn self.use_pad = (isinstance(padding, (list, tuple)) and (len(padding) == 4)) if self.use_pad: self.pad = nn.ZeroPad2d(padding=padding) padding = 0 self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) if self.use_bn: self.bn = nn.BatchNorm2d( num_features=out_channels, eps=bn_eps) if self.activate: self.activ = get_activation_layer(activation) def forward(self, x): if self.use_pad: x = self.pad(x) x = self.conv(x) if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) return x def conv1x1_block(in_channels, out_channels, stride=1, padding=0, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ 1x1 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 0 Padding value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=padding, groups=groups, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) def conv3x3_block(in_channels, out_channels, stride=1, padding=1, dilation=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ 3x3 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) def conv5x5_block(in_channels, out_channels, stride=1, padding=2, dilation=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ 5x5 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 2 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=5, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) def conv7x7_block(in_channels, out_channels, stride=1, padding=3, dilation=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ 7x7 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 3 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) def dwconv_block(in_channels, out_channels, kernel_size, stride=1, padding=1, dilation=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ Depthwise version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=out_channels, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) def dwconv3x3_block(in_channels, out_channels, stride=1, padding=1, dilation=1, bias=False, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ 3x3 depthwise version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return dwconv_block( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=bias, bn_eps=bn_eps, activation=activation) def dwconv5x5_block(in_channels, out_channels, stride=1, padding=2, dilation=1, bias=False, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ 5x5 depthwise version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 2 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return dwconv_block( in_channels=in_channels, out_channels=out_channels, kernel_size=5, stride=stride, padding=padding, dilation=dilation, bias=bias, bn_eps=bn_eps, activation=activation) class DwsConvBlock(nn.Module): """ Depthwise separable convolution block with BatchNorms and activations at each convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int, or tuple/list of 2 int, or tuple/list of 4 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. dw_use_bn : bool, default True Whether to use BatchNorm layer (depthwise convolution block). pw_use_bn : bool, default True Whether to use BatchNorm layer (pointwise convolution block). bn_eps : float, default 1e-5 Small float added to variance in Batch norm. dw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the depthwise convolution block. pw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the pointwise convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, bias=False, dw_use_bn=True, pw_use_bn=True, bn_eps=1e-5, dw_activation=(lambda: nn.ReLU(inplace=True)), pw_activation=(lambda: nn.ReLU(inplace=True))): super(DwsConvBlock, self).__init__() self.dw_conv = dwconv_block( in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, use_bn=dw_use_bn, bn_eps=bn_eps, activation=dw_activation) self.pw_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=bias, use_bn=pw_use_bn, bn_eps=bn_eps, activation=pw_activation) def forward(self, x): x = self.dw_conv(x) x = self.pw_conv(x) return x def dwsconv3x3_block(in_channels, out_channels, stride=1, padding=1, dilation=1, bias=False, bn_eps=1e-5, dw_activation=(lambda: nn.ReLU(inplace=True)), pw_activation=(lambda: nn.ReLU(inplace=True)), **kwargs): """ 3x3 depthwise separable version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. dw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the depthwise convolution block. pw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the pointwise convolution block. """ return DwsConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=bias, bn_eps=bn_eps, dw_activation=dw_activation, pw_activation=pw_activation, **kwargs) class PreConvBlock(nn.Module): """ Convolution block with Batch normalization and ReLU pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. return_preact : bool, default False Whether return pre-activation. It's used by PreResNet. activate : bool, default True Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, bias=False, use_bn=True, return_preact=False, activate=True): super(PreConvBlock, self).__init__() self.return_preact = return_preact self.activate = activate self.use_bn = use_bn if self.use_bn: self.bn = nn.BatchNorm2d(num_features=in_channels) if self.activate: self.activ = nn.ReLU(inplace=True) self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) def forward(self, x): if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) if self.return_preact: x_pre_activ = x x = self.conv(x) if self.return_preact: return x, x_pre_activ else: return x def pre_conv1x1_block(in_channels, out_channels, stride=1, bias=False, use_bn=True, return_preact=False, activate=True): """ 1x1 version of the pre-activated convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. return_preact : bool, default False Whether return pre-activation. activate : bool, default True Whether activate the convolution block. """ return PreConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, bias=bias, use_bn=use_bn, return_preact=return_preact, activate=activate) def pre_conv3x3_block(in_channels, out_channels, stride=1, padding=1, dilation=1, bias=False, use_bn=True, return_preact=False, activate=True): """ 3x3 version of the pre-activated convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. return_preact : bool, default False Whether return pre-activation. activate : bool, default True Whether activate the convolution block. """ return PreConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=bias, use_bn=use_bn, return_preact=return_preact, activate=activate) class AsymConvBlock(nn.Module): """ Asymmetric separable convolution block. Parameters: ---------- channels : int Number of input/output channels. kernel_size : int Convolution window size. padding : int Padding value for convolution layer. dilation : int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. lw_use_bn : bool, default True Whether to use BatchNorm layer (leftwise convolution block). rw_use_bn : bool, default True Whether to use BatchNorm layer (rightwise convolution block). bn_eps : float, default 1e-5 Small float added to variance in Batch norm. lw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the leftwise convolution block. rw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the rightwise convolution block. """ def __init__(self, channels, kernel_size, padding, dilation=1, groups=1, bias=False, lw_use_bn=True, rw_use_bn=True, bn_eps=1e-5, lw_activation=(lambda: nn.ReLU(inplace=True)), rw_activation=(lambda: nn.ReLU(inplace=True))): super(AsymConvBlock, self).__init__() self.lw_conv = ConvBlock( in_channels=channels, out_channels=channels, kernel_size=(kernel_size, 1), stride=1, padding=(padding, 0), dilation=(dilation, 1), groups=groups, bias=bias, use_bn=lw_use_bn, bn_eps=bn_eps, activation=lw_activation) self.rw_conv = ConvBlock( in_channels=channels, out_channels=channels, kernel_size=(1, kernel_size), stride=1, padding=(0, padding), dilation=(1, dilation), groups=groups, bias=bias, use_bn=rw_use_bn, bn_eps=bn_eps, activation=rw_activation) def forward(self, x): x = self.lw_conv(x) x = self.rw_conv(x) return x def asym_conv3x3_block(padding=1, **kwargs): """ 3x3 asymmetric separable convolution block. Parameters: ---------- channels : int Number of input/output channels. padding : int, default 1 Padding value for convolution layer. dilation : int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. lw_use_bn : bool, default True Whether to use BatchNorm layer (leftwise convolution block). rw_use_bn : bool, default True Whether to use BatchNorm layer (rightwise convolution block). bn_eps : float, default 1e-5 Small float added to variance in Batch norm. lw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the leftwise convolution block. rw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the rightwise convolution block. """ return AsymConvBlock( kernel_size=3, padding=padding, **kwargs) class DeconvBlock(nn.Module): """ Deconvolution block with batch normalization and activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the deconvolution. padding : int or tuple/list of 2 int Padding value for deconvolution layer. ext_padding : tuple/list of 4 int, default None Extra padding value for deconvolution layer. out_padding : int or tuple/list of 2 int Output padding value for deconvolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for deconvolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, ext_padding=None, out_padding=0, dilation=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): super(DeconvBlock, self).__init__() self.activate = (activation is not None) self.use_bn = use_bn self.use_pad = (ext_padding is not None) if self.use_pad: self.pad = nn.ZeroPad2d(padding=ext_padding) self.conv = nn.ConvTranspose2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=out_padding, dilation=dilation, groups=groups, bias=bias) if self.use_bn: self.bn = nn.BatchNorm2d( num_features=out_channels, eps=bn_eps) if self.activate: self.activ = get_activation_layer(activation) def forward(self, x): if self.use_pad: x = self.pad(x) x = self.conv(x) if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) return x def deconv3x3_block(padding=1, out_padding=1, **kwargs): """ 3x3 version of the deconvolution block with batch normalization and activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the deconvolution. padding : int or tuple/list of 2 int, default 1 Padding value for deconvolution layer. ext_padding : tuple/list of 4 int, default None Extra padding value for deconvolution layer. out_padding : int or tuple/list of 2 int, default 1 Output padding value for deconvolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for deconvolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return DeconvBlock( kernel_size=3, padding=padding, out_padding=out_padding, **kwargs) class NormActivation(nn.Module): """ Activation block with preliminary batch normalization. It's used by itself as the final block in PreResNet. Parameters: ---------- in_channels : int Number of input channels. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ def __init__(self, in_channels, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): super(NormActivation, self).__init__() self.bn = nn.BatchNorm2d( num_features=in_channels, eps=bn_eps) self.activ = get_activation_layer(activation) def forward(self, x): x = self.bn(x) x = self.activ(x) return x class InterpolationBlock(nn.Module): """ Interpolation upsampling block. Parameters: ---------- scale_factor : int Multiplier for spatial size. out_size : tuple of 2 int, default None Spatial size of the output tensor for the bilinear interpolation operation. mode : str, default 'bilinear' Algorithm used for upsampling. align_corners : bool, default True Whether to align the corner pixels of the input and output tensors. up : bool, default True Whether to upsample or downsample. """ def __init__(self, scale_factor, out_size=None, mode="bilinear", align_corners=True, up=True): super(InterpolationBlock, self).__init__() self.scale_factor = scale_factor self.out_size = out_size self.mode = mode self.align_corners = align_corners self.up = up def forward(self, x, size=None): if (self.mode == "bilinear") or (size is not None): out_size = self.calc_out_size(x) if size is None else size return F.interpolate( input=x, size=out_size, mode=self.mode, align_corners=self.align_corners) else: return F.interpolate( input=x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) def calc_out_size(self, x): if self.out_size is not None: return self.out_size if self.up: return tuple(s * self.scale_factor for s in x.shape[2:]) else: return tuple(s // self.scale_factor for s in x.shape[2:]) def __repr__(self): s = '{name}(scale_factor={scale_factor}, out_size={out_size}, mode={mode}, align_corners={align_corners}, up={up})' # noqa return s.format( name=self.__class__.__name__, scale_factor=self.scale_factor, out_size=self.out_size, mode=self.mode, align_corners=self.align_corners, up=self.up) def calc_flops(self, x): assert (x.shape[0] == 1) if self.mode == "bilinear": num_flops = 9 * x.numel() else: num_flops = 4 * x.numel() num_macs = 0 return num_flops, num_macs def channel_shuffle(x, groups): """ Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- x : Tensor Input tensor. groups : int Number of groups. Returns: ------- Tensor Resulted tensor. """ batch, channels, height, width = x.size() # assert (channels % groups == 0) channels_per_group = channels // groups x = x.view(batch, groups, channels_per_group, height, width) x = torch.transpose(x, 1, 2).contiguous() x = x.view(batch, channels, height, width) return x class ChannelShuffle(nn.Module): """ Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups. Parameters: ---------- channels : int Number of channels. groups : int Number of groups. """ def __init__(self, channels, groups): super(ChannelShuffle, self).__init__() # assert (channels % groups == 0) if channels % groups != 0: raise ValueError("channels must be divisible by groups") self.groups = groups def forward(self, x): return channel_shuffle(x, self.groups) def __repr__(self): s = "{name}(groups={groups})" return s.format( name=self.__class__.__name__, groups=self.groups) def channel_shuffle2(x, groups): """ Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. The alternative version. Parameters: ---------- x : Tensor Input tensor. groups : int Number of groups. Returns: ------- Tensor Resulted tensor. """ batch, channels, height, width = x.size() # assert (channels % groups == 0) channels_per_group = channels // groups x = x.view(batch, channels_per_group, groups, height, width) x = torch.transpose(x, 1, 2).contiguous() x = x.view(batch, channels, height, width) return x class ChannelShuffle2(nn.Module): """ Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups. The alternative version. Parameters: ---------- channels : int Number of channels. groups : int Number of groups. """ def __init__(self, channels, groups): super(ChannelShuffle2, self).__init__() # assert (channels % groups == 0) if channels % groups != 0: raise ValueError("channels must be divisible by groups") self.groups = groups def forward(self, x): return channel_shuffle2(x, self.groups) class SEBlock(nn.Module): """ Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : int Number of channels. reduction : int, default 16 Squeeze reduction value. mid_channels : int or None, default None Number of middle channels. round_mid : bool, default False Whether to round middle channel number (make divisible by 8). use_conv : bool, default True Whether to convolutional layers instead of fully-connected ones. activation : function, or str, or nn.Module, default 'relu' Activation function after the first convolution. out_activation : function, or str, or nn.Module, default 'sigmoid' Activation function after the last convolution. """ def __init__(self, channels, reduction=16, mid_channels=None, round_mid=False, use_conv=True, mid_activation=(lambda: nn.ReLU(inplace=True)), out_activation=(lambda: nn.Sigmoid())): super(SEBlock, self).__init__() self.use_conv = use_conv if mid_channels is None: mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction) self.pool = nn.AdaptiveAvgPool2d(output_size=1) if use_conv: self.conv1 = conv1x1( in_channels=channels, out_channels=mid_channels, bias=True) else: self.fc1 = nn.Linear( in_features=channels, out_features=mid_channels) self.activ = get_activation_layer(mid_activation) if use_conv: self.conv2 = conv1x1( in_channels=mid_channels, out_channels=channels, bias=True) else: self.fc2 = nn.Linear( in_features=mid_channels, out_features=channels) self.sigmoid = get_activation_layer(out_activation) def forward(self, x): w = self.pool(x) if not self.use_conv: w = w.view(x.size(0), -1) w = self.conv1(w) if self.use_conv else self.fc1(w) w = self.activ(w) w = self.conv2(w) if self.use_conv else self.fc2(w) w = self.sigmoid(w) if not self.use_conv: w = w.unsqueeze(2).unsqueeze(3) x = x * w return x class SABlock(nn.Module): """ Split-Attention block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- out_channels : int Number of output channels. groups : int Number of channel groups (cardinality, without radix). radix : int Number of splits within a cardinal group. reduction : int, default 4 Squeeze reduction value. min_channels : int, default 32 Minimal number of squeezed channels. use_conv : bool, default True Whether to convolutional layers instead of fully-connected ones. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. """ def __init__(self, out_channels, groups, radix, reduction=4, min_channels=32, use_conv=True, bn_eps=1e-5): super(SABlock, self).__init__() self.groups = groups self.radix = radix self.use_conv = use_conv in_channels = out_channels * radix mid_channels = max(in_channels // reduction, min_channels) self.pool = nn.AdaptiveAvgPool2d(output_size=1) if use_conv: self.conv1 = conv1x1( in_channels=out_channels, out_channels=mid_channels, bias=True) else: self.fc1 = nn.Linear( in_features=out_channels, out_features=mid_channels) self.bn = nn.BatchNorm2d( num_features=mid_channels, eps=bn_eps) self.activ = nn.ReLU(inplace=True) if use_conv: self.conv2 = conv1x1( in_channels=mid_channels, out_channels=in_channels, bias=True) else: self.fc2 = nn.Linear( in_features=mid_channels, out_features=in_channels) self.softmax = nn.Softmax(dim=1) def forward(self, x): batch, channels, height, width = x.size() x = x.view(batch, self.radix, channels // self.radix, height, width) w = x.sum(dim=1) w = self.pool(w) if not self.use_conv: w = w.view(x.size(0), -1) w = self.conv1(w) if self.use_conv else self.fc1(w) w = self.bn(w) w = self.activ(w) w = self.conv2(w) if self.use_conv else self.fc2(w) w = w.view(batch, self.groups, self.radix, -1) w = torch.transpose(w, 1, 2).contiguous() w = self.softmax(w) w = w.view(batch, self.radix, -1, 1, 1) x = x * w x = x.sum(dim=1) return x class SAConvBlock(nn.Module): """ Split-Attention convolution block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int, or tuple/list of 2 int, or tuple/list of 4 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. radix : int, default 2 Number of splits within a cardinal group. reduction : int, default 4 Squeeze reduction value. min_channels : int, default 32 Minimal number of squeezed channels. use_conv : bool, default True Whether to convolutional layers instead of fully-connected ones. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True)), radix=2, reduction=4, min_channels=32, use_conv=True): super(SAConvBlock, self).__init__() self.conv = ConvBlock( in_channels=in_channels, out_channels=(out_channels * radix), kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=(groups * radix), bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) self.att = SABlock( out_channels=out_channels, groups=groups, radix=radix, reduction=reduction, min_channels=min_channels, use_conv=use_conv, bn_eps=bn_eps) def forward(self, x): x = self.conv(x) x = self.att(x) return x def saconv3x3_block(in_channels, out_channels, stride=1, padding=1, **kwargs): """ 3x3 version of the Split-Attention convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. """ return SAConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, **kwargs) class DucBlock(nn.Module): """ Dense Upsampling Convolution (DUC) block from 'Understanding Convolution for Semantic Segmentation,' https://arxiv.org/abs/1702.08502. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. scale_factor : int Multiplier for spatial size. """ def __init__(self, in_channels, out_channels, scale_factor): super(DucBlock, self).__init__() mid_channels = (scale_factor * scale_factor) * out_channels self.conv = conv3x3_block( in_channels=in_channels, out_channels=mid_channels) self.pix_shuffle = nn.PixelShuffle(upscale_factor=scale_factor) def forward(self, x): x = self.conv(x) x = self.pix_shuffle(x) return x class IBN(nn.Module): """ Instance-Batch Normalization block from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- channels : int Number of channels. inst_fraction : float, default 0.5 The first fraction of channels for normalization. inst_first : bool, default True Whether instance normalization be on the first part of channels. """ def __init__(self, channels, first_fraction=0.5, inst_first=True): super(IBN, self).__init__() self.inst_first = inst_first h1_channels = int(math.floor(channels * first_fraction)) h2_channels = channels - h1_channels self.split_sections = [h1_channels, h2_channels] if self.inst_first: self.inst_norm = nn.InstanceNorm2d( num_features=h1_channels, affine=True) self.batch_norm = nn.BatchNorm2d(num_features=h2_channels) else: self.batch_norm = nn.BatchNorm2d(num_features=h1_channels) self.inst_norm = nn.InstanceNorm2d( num_features=h2_channels, affine=True) def forward(self, x): x1, x2 = torch.split(x, split_size_or_sections=self.split_sections, dim=1) if self.inst_first: x1 = self.inst_norm(x1.contiguous()) x2 = self.batch_norm(x2.contiguous()) else: x1 = self.batch_norm(x1.contiguous()) x2 = self.inst_norm(x2.contiguous()) x = torch.cat((x1, x2), dim=1) return x class DualPathSequential(nn.Sequential): """ A sequential container for modules with dual inputs/outputs. Modules will be executed in the order they are added. Parameters: ---------- return_two : bool, default True Whether to return two output after execution. first_ordinals : int, default 0 Number of the first modules with single input/output. last_ordinals : int, default 0 Number of the final modules with single input/output. dual_path_scheme : function Scheme of dual path response for a module. dual_path_scheme_ordinal : function Scheme of dual path response for an ordinal module. """ def __init__(self, return_two=True, first_ordinals=0, last_ordinals=0, dual_path_scheme=(lambda module, x1, x2: module(x1, x2)), dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2))): super(DualPathSequential, self).__init__() self.return_two = return_two self.first_ordinals = first_ordinals self.last_ordinals = last_ordinals self.dual_path_scheme = dual_path_scheme self.dual_path_scheme_ordinal = dual_path_scheme_ordinal def forward(self, x1, x2=None): length = len(self._modules.values()) for i, module in enumerate(self._modules.values()): if (i < self.first_ordinals) or (i >= length - self.last_ordinals): x1, x2 = self.dual_path_scheme_ordinal(module, x1, x2) else: x1, x2 = self.dual_path_scheme(module, x1, x2) if self.return_two: return x1, x2 else: return x1 class Concurrent(nn.Sequential): """ A container for concatenation of modules on the base of the sequential container. Parameters: ---------- axis : int, default 1 The axis on which to concatenate the outputs. stack : bool, default False Whether to concatenate tensors along a new dimension. merge_type : str, default None Type of branch merging. """ def __init__(self, axis=1, stack=False, merge_type=None): super(Concurrent, self).__init__() assert (merge_type is None) or (merge_type in ["cat", "stack", "sum"]) self.axis = axis if merge_type is not None: self.merge_type = merge_type else: self.merge_type = "stack" if stack else "cat" def forward(self, x): out = [] for module in self._modules.values(): out.append(module(x)) if self.merge_type == "stack": out = torch.stack(tuple(out), dim=self.axis) elif self.merge_type == "cat": out = torch.cat(tuple(out), dim=self.axis) elif self.merge_type == "sum": out = torch.stack(tuple(out), dim=self.axis).sum(self.axis) else: raise NotImplementedError() return out class SequentialConcurrent(nn.Sequential): """ A sequential container with concatenated outputs. Modules will be executed in the order they are added. Parameters: ---------- axis : int, default 1 The axis on which to concatenate the outputs. stack : bool, default False Whether to concatenate tensors along a new dimension. cat_input : bool, default True Whether to concatenate input tensor. """ def __init__(self, axis=1, stack=False, cat_input=True): super(SequentialConcurrent, self).__init__() self.axis = axis self.stack = stack self.cat_input = cat_input def forward(self, x): out = [x] if self.cat_input else [] for module in self._modules.values(): x = module(x) out.append(x) if self.stack: out = torch.stack(tuple(out), dim=self.axis) else: out = torch.cat(tuple(out), dim=self.axis) return out class ParametricSequential(nn.Sequential): """ A sequential container for modules with parameters. Modules will be executed in the order they are added. """ def __init__(self, *args): super(ParametricSequential, self).__init__(*args) def forward(self, x, **kwargs): for module in self._modules.values(): x = module(x, **kwargs) return x class ParametricConcurrent(nn.Sequential): """ A container for concatenation of modules with parameters. Parameters: ---------- axis : int, default 1 The axis on which to concatenate the outputs. """ def __init__(self, axis=1): super(ParametricConcurrent, self).__init__() self.axis = axis def forward(self, x, **kwargs): out = [] for module in self._modules.values(): out.append(module(x, **kwargs)) out = torch.cat(tuple(out), dim=self.axis) return out class Hourglass(nn.Module): """ A hourglass module. Parameters: ---------- down_seq : nn.Sequential Down modules as sequential. up_seq : nn.Sequential Up modules as sequential. skip_seq : nn.Sequential Skip connection modules as sequential. merge_type : str, default 'add' Type of concatenation of up and skip outputs. return_first_skip : bool, default False Whether return the first skip connection output. Used in ResAttNet. """ def __init__(self, down_seq, up_seq, skip_seq, merge_type="add", return_first_skip=False): super(Hourglass, self).__init__() self.depth = len(down_seq) assert (merge_type in ["cat", "add"]) assert (len(up_seq) == self.depth) assert (len(skip_seq) in (self.depth, self.depth + 1)) self.merge_type = merge_type self.return_first_skip = return_first_skip self.extra_skip = (len(skip_seq) == self.depth + 1) self.down_seq = down_seq self.up_seq = up_seq self.skip_seq = skip_seq def _merge(self, x, y): if y is not None: if self.merge_type == "cat": x = torch.cat((x, y), dim=1) elif self.merge_type == "add": x = x + y return x def forward(self, x, **kwargs): y = None down_outs = [x] for down_module in self.down_seq._modules.values(): x = down_module(x) down_outs.append(x) for i in range(len(down_outs)): if i != 0: y = down_outs[self.depth - i] skip_module = self.skip_seq[self.depth - i] y = skip_module(y) x = self._merge(x, y) if i != len(down_outs) - 1: if (i == 0) and self.extra_skip: skip_module = self.skip_seq[self.depth] x = skip_module(x) up_module = self.up_seq[self.depth - 1 - i] x = up_module(x) if self.return_first_skip: return x, y else: return x class SesquialteralHourglass(nn.Module): """ A sesquialteral hourglass block. Parameters: ---------- down1_seq : nn.Sequential The first down modules as sequential. skip1_seq : nn.Sequential The first skip connection modules as sequential. up_seq : nn.Sequential Up modules as sequential. skip2_seq : nn.Sequential The second skip connection modules as sequential. down2_seq : nn.Sequential The second down modules as sequential. merge_type : str, default 'cat' Type of concatenation of up and skip outputs. """ def __init__(self, down1_seq, skip1_seq, up_seq, skip2_seq, down2_seq, merge_type="cat"): super(SesquialteralHourglass, self).__init__() assert (len(down1_seq) == len(up_seq)) assert (len(down1_seq) == len(down2_seq)) assert (len(skip1_seq) == len(skip2_seq)) assert (len(down1_seq) == len(skip1_seq) - 1) assert (merge_type in ["cat", "add"]) self.merge_type = merge_type self.depth = len(down1_seq) self.down1_seq = down1_seq self.skip1_seq = skip1_seq self.up_seq = up_seq self.skip2_seq = skip2_seq self.down2_seq = down2_seq def _merge(self, x, y): if y is not None: if self.merge_type == "cat": x = torch.cat((x, y), dim=1) elif self.merge_type == "add": x = x + y return x def forward(self, x, **kwargs): y = self.skip1_seq[0](x) skip1_outs = [y] for i in range(self.depth): x = self.down1_seq[i](x) y = self.skip1_seq[i + 1](x) skip1_outs.append(y) x = skip1_outs[self.depth] y = self.skip2_seq[0](x) skip2_outs = [y] for i in range(self.depth): x = self.up_seq[i](x) y = skip1_outs[self.depth - 1 - i] x = self._merge(x, y) y = self.skip2_seq[i + 1](x) skip2_outs.append(y) x = self.skip2_seq[self.depth](x) for i in range(self.depth): x = self.down2_seq[i](x) y = skip2_outs[self.depth - 1 - i] x = self._merge(x, y) return x class MultiOutputSequential(nn.Sequential): """ A sequential container with multiple outputs. Modules will be executed in the order they are added. Parameters: ---------- multi_output : bool, default True Whether to return multiple output. dual_output : bool, default False Whether to return dual output. return_last : bool, default True Whether to forcibly return last value. """ def __init__(self, multi_output=True, dual_output=False, return_last=True): super(MultiOutputSequential, self).__init__() self.multi_output = multi_output self.dual_output = dual_output self.return_last = return_last def forward(self, x): outs = [] for module in self._modules.values(): x = module(x) if hasattr(module, "do_output") and module.do_output: outs.append(x) elif hasattr(module, "do_output2") and module.do_output2: assert (type(x) == tuple) outs.extend(x[1]) x = x[0] if self.multi_output: return [x] + outs if self.return_last else outs elif self.dual_output: return x, outs else: return x class ParallelConcurent(nn.Sequential): """ A sequential container with multiple inputs and single/multiple outputs. Modules will be executed in the order they are added. Parameters: ---------- axis : int, default 1 The axis on which to concatenate the outputs. merge_type : str, default 'list' Type of branch merging. """ def __init__(self, axis=1, merge_type="list"): super(ParallelConcurent, self).__init__() assert (merge_type is None) or (merge_type in ["list", "cat", "stack", "sum"]) self.axis = axis self.merge_type = merge_type def forward(self, x): out = [] for module, xi in zip(self._modules.values(), x): out.append(module(xi)) if self.merge_type == "list": pass elif self.merge_type == "stack": out = torch.stack(tuple(out), dim=self.axis) elif self.merge_type == "cat": out = torch.cat(tuple(out), dim=self.axis) elif self.merge_type == "sum": out = torch.stack(tuple(out), dim=self.axis).sum(self.axis) else: raise NotImplementedError() return out class DualPathParallelConcurent(nn.Sequential): """ A sequential container with multiple dual-path inputs and single/multiple outputs. Modules will be executed in the order they are added. Parameters: ---------- axis : int, default 1 The axis on which to concatenate the outputs. merge_type : str, default 'list' Type of branch merging. """ def __init__(self, axis=1, merge_type="list"): super(DualPathParallelConcurent, self).__init__() assert (merge_type is None) or (merge_type in ["list", "cat", "stack", "sum"]) self.axis = axis self.merge_type = merge_type def forward(self, x1, x2): x1_out = [] x2_out = [] for module, x1i, x2i in zip(self._modules.values(), x1, x2): y1i, y2i = module(x1i, x2i) x1_out.append(y1i) x2_out.append(y2i) if self.merge_type == "list": pass elif self.merge_type == "stack": x1_out = torch.stack(tuple(x1_out), dim=self.axis) x2_out = torch.stack(tuple(x2_out), dim=self.axis) elif self.merge_type == "cat": x1_out = torch.cat(tuple(x1_out), dim=self.axis) x2_out = torch.cat(tuple(x2_out), dim=self.axis) elif self.merge_type == "sum": x1_out = torch.stack(tuple(x1_out), dim=self.axis).sum(self.axis) x2_out = torch.stack(tuple(x2_out), dim=self.axis).sum(self.axis) else: raise NotImplementedError() return x1_out, x2_out class Flatten(nn.Module): """ Simple flatten module. """ def forward(self, x): return x.view(x.size(0), -1) class HeatmapMaxDetBlock(nn.Module): """ Heatmap maximum detector block (for human pose estimation task). """ def __init__(self): super(HeatmapMaxDetBlock, self).__init__() def forward(self, x): heatmap = x vector_dim = 2 batch = heatmap.shape[0] channels = heatmap.shape[1] in_size = x.shape[2:] heatmap_vector = heatmap.view(batch, channels, -1) scores, indices = heatmap_vector.max(dim=vector_dim, keepdims=True) scores_mask = (scores > 0.0).float() pts_x = (indices % in_size[1]) * scores_mask pts_y = (indices // in_size[1]) * scores_mask pts = torch.cat((pts_x, pts_y, scores), dim=vector_dim) for b in range(batch): for k in range(channels): hm = heatmap[b, k, :, :] px = int(pts[b, k, 0]) py = int(pts[b, k, 1]) if (0 < px < in_size[1] - 1) and (0 < py < in_size[0] - 1): pts[b, k, 0] += (hm[py, px + 1] - hm[py, px - 1]).sign() * 0.25 pts[b, k, 1] += (hm[py + 1, px] - hm[py - 1, px]).sign() * 0.25 return pts @staticmethod def calc_flops(x): assert (x.shape[0] == 1) num_flops = x.numel() + 26 * x.shape[1] num_macs = 0 return num_flops, num_macs
74,363
30.902188
130
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/lwopenpose_cmupan.py
""" Lightweight OpenPose 2D/3D for CMU Panoptic, implemented in PyTorch. Original paper: 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004. """ __all__ = ['LwOpenPose', 'lwopenpose2d_mobilenet_cmupan_coco', 'lwopenpose3d_mobilenet_cmupan_coco', 'LwopDecoderFinalBlock'] import os import torch from torch import nn from .common import conv1x1, conv1x1_block, conv3x3_block, dwsconv3x3_block class LwopResBottleneck(nn.Module): """ Bottleneck block for residual path in the residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bias : bool, default True Whether the layer uses a bias vector. bottleneck_factor : int, default 2 Bottleneck factor. squeeze_out : bool, default False Whether to squeeze the output channels. """ def __init__(self, in_channels, out_channels, stride, bias=True, bottleneck_factor=2, squeeze_out=False): super(LwopResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor if squeeze_out else in_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bias=bias) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, bias=bias) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, bias=bias, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class LwopResUnit(nn.Module): """ ResNet-like residual unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. bias : bool, default True Whether the layer uses a bias vector. bottleneck_factor : int, default 2 Bottleneck factor. squeeze_out : bool, default False Whether to squeeze the output channels. activate : bool, default False Whether to activate the sum. """ def __init__(self, in_channels, out_channels, stride=1, bias=True, bottleneck_factor=2, squeeze_out=False, activate=False): super(LwopResUnit, self).__init__() self.activate = activate self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = LwopResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, bottleneck_factor=bottleneck_factor, squeeze_out=squeeze_out) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, activation=None) if self.activate: self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity if self.activate: x = self.activ(x) return x class LwopEncoderFinalBlock(nn.Module): """ Lightweight OpenPose 2D/3D specific encoder final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(LwopEncoderFinalBlock, self).__init__() self.pre_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=True, use_bn=False) self.body = nn.Sequential() for i in range(3): self.body.add_module("block{}".format(i + 1), dwsconv3x3_block( in_channels=out_channels, out_channels=out_channels, dw_use_bn=False, pw_use_bn=False, dw_activation=(lambda: nn.ELU(inplace=True)), pw_activation=(lambda: nn.ELU(inplace=True)))) self.post_conv = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=True, use_bn=False) def forward(self, x): x = self.pre_conv(x) x = x + self.body(x) x = self.post_conv(x) return x class LwopRefinementBlock(nn.Module): """ Lightweight OpenPose 2D/3D specific refinement block for decoder units. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(LwopRefinementBlock, self).__init__() self.pre_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=True, use_bn=False) self.body = nn.Sequential() self.body.add_module("block1", conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=True)) self.body.add_module("block2", conv3x3_block( in_channels=out_channels, out_channels=out_channels, padding=2, dilation=2, bias=True)) def forward(self, x): x = self.pre_conv(x) x = x + self.body(x) return x class LwopDecoderBend(nn.Module): """ Lightweight OpenPose 2D/3D specific decoder bend block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, mid_channels, out_channels): super(LwopDecoderBend, self).__init__() self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bias=True, use_bn=False) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, bias=True) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class LwopDecoderInitBlock(nn.Module): """ Lightweight OpenPose 2D/3D specific decoder init block. Parameters: ---------- in_channels : int Number of input channels. keypoints : int Number of keypoints. """ def __init__(self, in_channels, keypoints): super(LwopDecoderInitBlock, self).__init__() num_heatmap = keypoints num_paf = 2 * keypoints bend_mid_channels = 512 self.body = nn.Sequential() for i in range(3): self.body.add_module("block{}".format(i + 1), conv3x3_block( in_channels=in_channels, out_channels=in_channels, bias=True, use_bn=False)) self.heatmap_bend = LwopDecoderBend( in_channels=in_channels, mid_channels=bend_mid_channels, out_channels=num_heatmap) self.paf_bend = LwopDecoderBend( in_channels=in_channels, mid_channels=bend_mid_channels, out_channels=num_paf) def forward(self, x): y = self.body(x) heatmap = self.heatmap_bend(y) paf = self.paf_bend(y) y = torch.cat((x, heatmap, paf), dim=1) return y class LwopDecoderUnit(nn.Module): """ Lightweight OpenPose 2D/3D specific decoder init. Parameters: ---------- in_channels : int Number of input channels. keypoints : int Number of keypoints. """ def __init__(self, in_channels, keypoints): super(LwopDecoderUnit, self).__init__() num_heatmap = keypoints num_paf = 2 * keypoints self.features_channels = in_channels - num_heatmap - num_paf self.body = nn.Sequential() for i in range(5): self.body.add_module("block{}".format(i + 1), LwopRefinementBlock( in_channels=in_channels, out_channels=self.features_channels)) in_channels = self.features_channels self.heatmap_bend = LwopDecoderBend( in_channels=self.features_channels, mid_channels=self.features_channels, out_channels=num_heatmap) self.paf_bend = LwopDecoderBend( in_channels=self.features_channels, mid_channels=self.features_channels, out_channels=num_paf) def forward(self, x): features = x[:, :self.features_channels] y = self.body(x) heatmap = self.heatmap_bend(y) paf = self.paf_bend(y) y = torch.cat((features, heatmap, paf), dim=1) return y class LwopDecoderFeaturesBend(nn.Module): """ Lightweight OpenPose 2D/3D specific decoder 3D features bend. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, mid_channels, out_channels): super(LwopDecoderFeaturesBend, self).__init__() self.body = nn.Sequential() for i in range(2): self.body.add_module("block{}".format(i + 1), LwopRefinementBlock( in_channels=in_channels, out_channels=mid_channels)) in_channels = mid_channels self.features_bend = LwopDecoderBend( in_channels=mid_channels, mid_channels=mid_channels, out_channels=out_channels) def forward(self, x): x = self.body(x) x = self.features_bend(x) return x class LwopDecoderFinalBlock(nn.Module): """ Lightweight OpenPose 2D/3D specific decoder final block for calcualation 3D poses. Parameters: ---------- in_channels : int Number of input channels. keypoints : int Number of keypoints. bottleneck_factor : int Bottleneck factor. calc_3d_features : bool Whether to calculate 3D features. """ def __init__(self, in_channels, keypoints, bottleneck_factor, calc_3d_features): super(LwopDecoderFinalBlock, self).__init__() self.num_heatmap_paf = 3 * keypoints self.calc_3d_features = calc_3d_features features_out_channels = self.num_heatmap_paf features_in_channels = in_channels - features_out_channels if self.calc_3d_features: self.body = nn.Sequential() for i in range(5): self.body.add_module("block{}".format(i + 1), LwopResUnit( in_channels=in_channels, out_channels=features_in_channels, bottleneck_factor=bottleneck_factor)) in_channels = features_in_channels self.features_bend = LwopDecoderFeaturesBend( in_channels=features_in_channels, mid_channels=features_in_channels, out_channels=features_out_channels) def forward(self, x): heatmap_paf_2d = x[:, -self.num_heatmap_paf:] if not self.calc_3d_features: return heatmap_paf_2d x = self.body(x) x = self.features_bend(x) y = torch.cat((heatmap_paf_2d, x), dim=1) return y class LwOpenPose(nn.Module): """ Lightweight OpenPose 2D/3D model from 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004. Parameters: ---------- encoder_channels : list of list of int Number of output channels for each encoder unit. encoder_paddings : list of list of int Padding/dilation value for each encoder unit. encoder_init_block_channels : int Number of output channels for the encoder initial unit. encoder_final_block_channels : int Number of output channels for the encoder final unit. refinement_units : int Number of refinement blocks in the decoder. calc_3d_features : bool Whether to calculate 3D features. return_heatmap : bool, default True Whether to return only heatmap. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (256, 192) Spatial size of the expected input image. keypoints : int, default 19 Number of keypoints. """ def __init__(self, encoder_channels, encoder_paddings, encoder_init_block_channels, encoder_final_block_channels, refinement_units, calc_3d_features, return_heatmap=True, in_channels=3, in_size=(368, 368), keypoints=19): super(LwOpenPose, self).__init__() assert (in_channels == 3) self.in_size = in_size self.keypoints = keypoints self.return_heatmap = return_heatmap self.calc_3d_features = calc_3d_features num_heatmap_paf = 3 * keypoints self.encoder = nn.Sequential() backbone = nn.Sequential() backbone.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=encoder_init_block_channels, stride=2)) in_channels = encoder_init_block_channels for i, channels_per_stage in enumerate(encoder_channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 padding = encoder_paddings[i][j] stage.add_module("unit{}".format(j + 1), dwsconv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, padding=padding, dilation=padding)) in_channels = out_channels backbone.add_module("stage{}".format(i + 1), stage) self.encoder.add_module("backbone", backbone) self.encoder.add_module("final_block", LwopEncoderFinalBlock( in_channels=in_channels, out_channels=encoder_final_block_channels)) in_channels = encoder_final_block_channels self.decoder = nn.Sequential() self.decoder.add_module("init_block", LwopDecoderInitBlock( in_channels=in_channels, keypoints=keypoints)) in_channels = encoder_final_block_channels + num_heatmap_paf for i in range(refinement_units): self.decoder.add_module("unit{}".format(i + 1), LwopDecoderUnit( in_channels=in_channels, keypoints=keypoints)) self.decoder.add_module("final_block", LwopDecoderFinalBlock( in_channels=in_channels, keypoints=keypoints, bottleneck_factor=2, calc_3d_features=calc_3d_features)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.encoder(x) x = self.decoder(x) if self.return_heatmap: return x else: return x def get_lwopenpose(calc_3d_features, keypoints, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create Lightweight OpenPose 2D/3D model with specific parameters. Parameters: ---------- calc_3d_features : bool, default False Whether to calculate 3D features. keypoints : int Number of keypoints. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ encoder_channels = [[64], [128, 128], [256, 256, 512, 512, 512, 512, 512, 512]] encoder_paddings = [[1], [1, 1], [1, 1, 1, 2, 1, 1, 1, 1]] encoder_init_block_channels = 32 encoder_final_block_channels = 128 refinement_units = 1 net = LwOpenPose( encoder_channels=encoder_channels, encoder_paddings=encoder_paddings, encoder_init_block_channels=encoder_init_block_channels, encoder_final_block_channels=encoder_final_block_channels, refinement_units=refinement_units, calc_3d_features=calc_3d_features, keypoints=keypoints, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def lwopenpose2d_mobilenet_cmupan_coco(keypoints=19, **kwargs): """ Lightweight OpenPose 2D model on the base of MobileNet for CMU Panoptic from 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004. Parameters: ---------- keypoints : int, default 19 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_lwopenpose(calc_3d_features=False, keypoints=keypoints, model_name="lwopenpose2d_mobilenet_cmupan_coco", **kwargs) def lwopenpose3d_mobilenet_cmupan_coco(keypoints=19, **kwargs): """ Lightweight OpenPose 3D model on the base of MobileNet for CMU Panoptic from 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004. Parameters: ---------- keypoints : int, default 19 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_lwopenpose(calc_3d_features=True, keypoints=keypoints, model_name="lwopenpose3d_mobilenet_cmupan_coco", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): in_size = (368, 368) keypoints = 19 return_heatmap = True pretrained = False models = [ (lwopenpose2d_mobilenet_cmupan_coco, "2d"), (lwopenpose3d_mobilenet_cmupan_coco, "3d"), ] for model, model_dim in models: net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != lwopenpose2d_mobilenet_cmupan_coco or weight_count == 4091698) assert (model != lwopenpose3d_mobilenet_cmupan_coco or weight_count == 5085983) batch = 1 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() if model_dim == "2d": assert (tuple(y.size()) == (batch, 3 * keypoints, in_size[0] // 8, in_size[0] // 8)) else: assert (tuple(y.size()) == (batch, 6 * keypoints, in_size[0] // 8, in_size[0] // 8)) if __name__ == "__main__": _test()
21,152
31.643519
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/rir_cifar.py
""" RiR for CIFAR/SVHN, implemented in PyTorch. Original paper: 'Resnet in Resnet: Generalizing Residual Architectures,' https://arxiv.org/abs/1603.08029. """ __all__ = ['CIFARRiR', 'rir_cifar10', 'rir_cifar100', 'rir_svhn', 'RiRFinalBlock'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1, conv3x3, conv1x1_block, conv3x3_block, DualPathSequential class PostActivation(nn.Module): """ Pure pre-activation block without convolution layer. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(PostActivation, self).__init__() self.bn = nn.BatchNorm2d(num_features=in_channels) self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.bn(x) x = self.activ(x) return x class RiRUnit(nn.Module): """ RiR unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(RiRUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.res_pass_conv = conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride) self.trans_pass_conv = conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride) self.res_cross_conv = conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride) self.trans_cross_conv = conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride) self.res_postactiv = PostActivation(in_channels=out_channels) self.trans_postactiv = PostActivation(in_channels=out_channels) if self.resize_identity: self.identity_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, stride=stride) def forward(self, x_res, x_trans): if self.resize_identity: x_res_identity = self.identity_conv(x_res) else: x_res_identity = x_res y_res = self.res_cross_conv(x_res) y_trans = self.trans_cross_conv(x_trans) x_res = self.res_pass_conv(x_res) x_trans = self.trans_pass_conv(x_trans) x_res = x_res + x_res_identity + y_trans x_trans = x_trans + y_res x_res = self.res_postactiv(x_res) x_trans = self.trans_postactiv(x_trans) return x_res, x_trans class RiRInitBlock(nn.Module): """ RiR initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(RiRInitBlock, self).__init__() self.res_conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels) self.trans_conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels) def forward(self, x, _): x_res = self.res_conv(x) x_trans = self.trans_conv(x) return x_res, x_trans class RiRFinalBlock(nn.Module): """ RiR final block. """ def __init__(self): super(RiRFinalBlock, self).__init__() def forward(self, x_res, x_trans): x = torch.cat((x_res, x_trans), dim=1) return x, None class CIFARRiR(nn.Module): """ RiR model for CIFAR from 'Resnet in Resnet: Generalizing Residual Architectures,' https://arxiv.org/abs/1603.08029. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARRiR, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = DualPathSequential( return_two=False, first_ordinals=0, last_ordinals=0) self.features.add_module("init_block", RiRInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), RiRUnit( in_channels=in_channels, out_channels=out_channels, stride=stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", RiRFinalBlock()) in_channels = final_block_channels self.output = nn.Sequential() self.output.add_module("final_conv", conv1x1_block( in_channels=in_channels, out_channels=num_classes, activation=None)) self.output.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = self.output(x) x = x.view(x.size(0), -1) return x def get_rir_cifar(num_classes, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create RiR model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification classes. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [[48, 48, 48, 48], [96, 96, 96, 96, 96, 96], [192, 192, 192, 192, 192, 192]] init_block_channels = 48 final_block_channels = 384 net = CIFARRiR( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def rir_cifar10(num_classes=10, **kwargs): """ RiR model for CIFAR-10 from 'Resnet in Resnet: Generalizing Residual Architectures,' https://arxiv.org/abs/1603.08029. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_rir_cifar(num_classes=num_classes, model_name="rir_cifar10", **kwargs) def rir_cifar100(num_classes=100, **kwargs): """ RiR model for CIFAR-100 from 'Resnet in Resnet: Generalizing Residual Architectures,' https://arxiv.org/abs/1603.08029. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_rir_cifar(num_classes=num_classes, model_name="rir_cifar100", **kwargs) def rir_svhn(num_classes=10, **kwargs): """ RiR model for SVHN from 'Resnet in Resnet: Generalizing Residual Architectures,' https://arxiv.org/abs/1603.08029. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_rir_cifar(num_classes=num_classes, model_name="rir_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (rir_cifar10, 10), (rir_cifar100, 100), (rir_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != rir_cifar10 or weight_count == 9492980) assert (model != rir_cifar100 or weight_count == 9527720) assert (model != rir_svhn or weight_count == 9492980) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes)) if __name__ == "__main__": _test()
10,658
29.454286
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/unet.py
""" U-Net for image segmentation, implemented in PyTorch. Original paper: 'U-Net: Convolutional Networks for Biomedical Image Segmentation,' https://arxiv.org/abs/1505.04597. """ __all__ = ['UNet', 'unet_cityscapes'] import os import torch import torch.nn as nn from .common import conv1x1, conv3x3_block, InterpolationBlock, Hourglass, Identity class UNetBlock(nn.Module): """ U-Net specific base block (double convolution). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bias : bool Whether the layer uses a bias vector. """ def __init__(self, in_channels, out_channels, bias): super(UNetBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, bias=bias) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=bias) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class UNetDownStage(nn.Module): """ U-Net specific downscale (encoder) stage. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bias : bool Whether the layer uses a bias vector. """ def __init__(self, in_channels, out_channels, bias): super(UNetDownStage, self).__init__() self.pool = nn.MaxPool2d(kernel_size=2) self.conv = UNetBlock( in_channels=in_channels, out_channels=out_channels, bias=bias) def forward(self, x): x = self.pool(x) x = self.conv(x) return x class UNetUpStage(nn.Module): """ U-Net specific upscale (decoder) stage. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bias : bool Whether the layer uses a bias vector. """ def __init__(self, in_channels, out_channels, bias): super(UNetUpStage, self).__init__() self.conv = UNetBlock( in_channels=in_channels, out_channels=out_channels, bias=bias) self.up = InterpolationBlock( scale_factor=2, align_corners=True) def forward(self, x): x = self.conv(x) x = self.up(x) return x class UNetHead(nn.Module): """ U-Net specific head. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bias : bool Whether the layer uses a bias vector. """ def __init__(self, in_channels, out_channels, bias): super(UNetHead, self).__init__() mid_channels = in_channels // 2 self.conv1 = UNetBlock( in_channels=in_channels, out_channels=mid_channels, bias=bias) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, bias=True) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class UNet(nn.Module): """ U-Net model from 'U-Net: Convolutional Networks for Biomedical Image Segmentation,' https://arxiv.org/abs/1505.04597. Parameters: ---------- channels : list of list of int Number of output channels for each stage in encoder and decoder. init_block_channels : int Number of output channels for the initial unit. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, channels, init_block_channels, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(UNet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size bias = True self.stem = UNetBlock( in_channels=in_channels, out_channels=init_block_channels, bias=bias) in_channels = init_block_channels down_seq = nn.Sequential() skip_seq = nn.Sequential() for i, out_channels in enumerate(channels[0]): down_seq.add_module("down{}".format(i + 1), UNetDownStage( in_channels=in_channels, out_channels=out_channels, bias=bias)) in_channels = out_channels skip_seq.add_module("skip{}".format(i + 1), Identity()) up_seq = nn.Sequential() for i, out_channels in enumerate(channels[1]): if i == 0: up_seq.add_module("down{}".format(i + 1), InterpolationBlock( scale_factor=2, align_corners=True)) else: up_seq.add_module("down{}".format(i + 1), UNetUpStage( in_channels=(2 * in_channels), out_channels=out_channels, bias=bias)) in_channels = out_channels up_seq = up_seq[::-1] self.hg = Hourglass( down_seq=down_seq, up_seq=up_seq, skip_seq=skip_seq, merge_type="cat") self.head = UNetHead( in_channels=(2 * in_channels), out_channels=num_classes, bias=True) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.stem(x) x = self.hg(x) x = self.head(x) return x def get_unet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create U-Net model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [[128, 256, 512, 512], [512, 256, 128, 64]] init_block_channels = 64 net = UNet( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def unet_cityscapes(num_classes=19, **kwargs): """ U-Net model for Cityscapes from 'U-Net: Convolutional Networks for Biomedical Image Segmentation,' https://arxiv.org/abs/1505.04597. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_unet(num_classes=num_classes, model_name="unet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ unet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != unet_cityscapes or weight_count == 13396499) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
9,378
27.335347
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/diapreresnet.py
""" DIA-PreResNet for ImageNet-1K, implemented in PyTorch. Original papers: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. """ __all__ = ['DIAPreResNet', 'diapreresnet10', 'diapreresnet12', 'diapreresnet14', 'diapreresnetbc14b', 'diapreresnet16', 'diapreresnet18', 'diapreresnet26', 'diapreresnetbc26b', 'diapreresnet34', 'diapreresnetbc38b', 'diapreresnet50', 'diapreresnet50b', 'diapreresnet101', 'diapreresnet101b', 'diapreresnet152', 'diapreresnet152b', 'diapreresnet200', 'diapreresnet200b', 'diapreresnet269b', 'DIAPreResUnit'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1, DualPathSequential from .preresnet import PreResBlock, PreResBottleneck, PreResInitBlock, PreResActivation from .diaresnet import DIAAttention class DIAPreResUnit(nn.Module): """ DIA-PreResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. attention : nn.Module, default None Attention module. """ def __init__(self, in_channels, out_channels, stride, bottleneck, conv1_stride, attention=None): super(DIAPreResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = PreResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride) else: self.body = PreResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, stride=stride) self.attention = attention def forward(self, x, hc=None): identity = x x, x_pre_activ = self.body(x) if self.resize_identity: identity = self.identity_conv(x_pre_activ) x, hc = self.attention(x, hc) x = x + identity return x, hc class DIAPreResNet(nn.Module): """ DIA-PreResNet model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), num_classes=1000): super(DIAPreResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential(return_two=False) attention = DIAAttention( in_x_features=channels_per_stage[0], in_h_features=channels_per_stage[0]) for j, out_channels in enumerate(channels_per_stage): stride = 1 if (i == 0) or (j != 0) else 2 stage.add_module("unit{}".format(j + 1), DIAPreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride, attention=attention)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_diapreresnet(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DIA-PreResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] elif blocks == 269: layers = [3, 30, 48, 8] else: raise ValueError("Unsupported DIA-PreResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = DIAPreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def diapreresnet10(**kwargs): """ DIA-PreResNet-10 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=10, model_name="diapreresnet10", **kwargs) def diapreresnet12(**kwargs): """ DIA-PreResNet-12 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=12, model_name="diapreresnet12", **kwargs) def diapreresnet14(**kwargs): """ DIA-PreResNet-14 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=14, model_name="diapreresnet14", **kwargs) def diapreresnetbc14b(**kwargs): """ DIA-PreResNet-BC-14b model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="diapreresnetbc14b", **kwargs) def diapreresnet16(**kwargs): """ DIA-PreResNet-16 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=16, model_name="diapreresnet16", **kwargs) def diapreresnet18(**kwargs): """ DIA-PreResNet-18 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=18, model_name="diapreresnet18", **kwargs) def diapreresnet26(**kwargs): """ DIA-PreResNet-26 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=26, bottleneck=False, model_name="diapreresnet26", **kwargs) def diapreresnetbc26b(**kwargs): """ DIA-PreResNet-BC-26b model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="diapreresnetbc26b", **kwargs) def diapreresnet34(**kwargs): """ DIA-PreResNet-34 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=34, model_name="diapreresnet34", **kwargs) def diapreresnetbc38b(**kwargs): """ DIA-PreResNet-BC-38b model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="diapreresnetbc38b", **kwargs) def diapreresnet50(**kwargs): """ DIA-PreResNet-50 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=50, model_name="diapreresnet50", **kwargs) def diapreresnet50b(**kwargs): """ DIA-PreResNet-50 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=50, conv1_stride=False, model_name="diapreresnet50b", **kwargs) def diapreresnet101(**kwargs): """ DIA-PreResNet-101 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=101, model_name="diapreresnet101", **kwargs) def diapreresnet101b(**kwargs): """ DIA-PreResNet-101 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=101, conv1_stride=False, model_name="diapreresnet101b", **kwargs) def diapreresnet152(**kwargs): """ DIA-PreResNet-152 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=152, model_name="diapreresnet152", **kwargs) def diapreresnet152b(**kwargs): """ DIA-PreResNet-152 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=152, conv1_stride=False, model_name="diapreresnet152b", **kwargs) def diapreresnet200(**kwargs): """ DIA-PreResNet-200 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=200, model_name="diapreresnet200", **kwargs) def diapreresnet200b(**kwargs): """ DIA-PreResNet-200 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=200, conv1_stride=False, model_name="diapreresnet200b", **kwargs) def diapreresnet269b(**kwargs): """ DIA-PreResNet-269 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diapreresnet(blocks=269, conv1_stride=False, model_name="diapreresnet269b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ diapreresnet10, diapreresnet12, diapreresnet14, diapreresnetbc14b, diapreresnet16, diapreresnet18, diapreresnet26, diapreresnetbc26b, diapreresnet34, diapreresnetbc38b, diapreresnet50, diapreresnet50b, diapreresnet101, diapreresnet101b, diapreresnet152, diapreresnet152b, diapreresnet200, diapreresnet200b, diapreresnet269b, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != diapreresnet10 or weight_count == 6295688) assert (model != diapreresnet12 or weight_count == 6369672) assert (model != diapreresnet14 or weight_count == 6665096) assert (model != diapreresnetbc14b or weight_count == 24016424) assert (model != diapreresnet16 or weight_count == 7845768) assert (model != diapreresnet18 or weight_count == 12566408) assert (model != diapreresnet26 or weight_count == 18837128) assert (model != diapreresnetbc26b or weight_count == 29946664) assert (model != diapreresnet34 or weight_count == 22674568) assert (model != diapreresnetbc38b or weight_count == 35876904) assert (model != diapreresnet50 or weight_count == 39508520) assert (model != diapreresnet50b or weight_count == 39508520) assert (model != diapreresnet101 or weight_count == 58500648) assert (model != diapreresnet101b or weight_count == 58500648) assert (model != diapreresnet152 or weight_count == 74144296) assert (model != diapreresnet152b or weight_count == 74144296) assert (model != diapreresnet200 or weight_count == 78625320) assert (model != diapreresnet200b or weight_count == 78625320) assert (model != diapreresnet269b or weight_count == 116024872) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
21,166
33.814145
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/jasperdr.py
""" Jasper DR (Dense Residual) for ASR, implemented in PyTorch. Original paper: 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. """ __all__ = ['jasperdr10x5_en', 'jasperdr10x5_en_nr'] from .jasper import get_jasper def jasperdr10x5_en(num_classes=29, **kwargs): """ Jasper DR 10x5 model for English language from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- num_classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_jasper(num_classes=num_classes, version=("jasper", "10x5"), use_dr=True, model_name="jasperdr10x5_en", **kwargs) def jasperdr10x5_en_nr(num_classes=29, **kwargs): """ Jasper DR 10x5 model for English language (with presence of noise) from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- num_classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_jasper(num_classes=num_classes, version=("jasper", "10x5"), use_dr=True, model_name="jasperdr10x5_en_nr", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import numpy as np import torch pretrained = False audio_features = 64 models = [ jasperdr10x5_en, jasperdr10x5_en_nr, ] for model in models: net = model( in_channels=audio_features, pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != jasperdr10x5_en or weight_count == 332632349) assert (model != jasperdr10x5_en_nr or weight_count == 332632349) batch = 3 seq_len = np.random.randint(60, 150, batch) seq_len_max = seq_len.max() + 2 x = torch.randn(batch, audio_features, seq_len_max) x_len = torch.tensor(seq_len, dtype=torch.long, device=x.device) y, y_len = net(x, x_len) # y.sum().backward() assert (tuple(y.size())[:2] == (batch, net.num_classes)) assert (y.size()[2] in [seq_len_max // 2, seq_len_max // 2 + 1]) if __name__ == "__main__": _test()
2,982
30.4
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/segnet.py
""" SegNet for image segmentation, implemented in PyTorch. Original paper: 'SegNet: A Deep Convolutional Encoder-Decoder Architecture for Image Segmentation,' https://arxiv.org/abs/1511.00561. """ __all__ = ['SegNet', 'segnet_cityscapes'] import os import torch import torch.nn as nn from .common import conv3x3, conv3x3_block, DualPathSequential class SegNet(nn.Module): """ SegNet model from 'SegNet: A Deep Convolutional Encoder-Decoder Architecture for Image Segmentation,' https://arxiv.org/abs/1511.00561. Parameters: ---------- channels : list of list of int Number of output channels for each stage in encoder and decoder. layers : list of list of int Number of layers for each stage in encoder and decoder. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, channels, layers, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(SegNet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size bias = True for i, out_channels in enumerate(channels[0]): stage = nn.Sequential() for j in range(layers[0][i]): if j == layers[0][i] - 1: unit = nn.MaxPool2d( kernel_size=2, stride=2, return_indices=True) else: unit = conv3x3_block( in_channels=in_channels, out_channels=out_channels, bias=bias) stage.add_module("unit{}".format(j + 1), unit) in_channels = out_channels setattr(self, "down_stage{}".format(i + 1), stage) for i, channels_per_stage in enumerate(channels[1]): stage = DualPathSequential( return_two=False, last_ordinals=(layers[1][i] - 1), dual_path_scheme=(lambda module, x1, x2: (module(x1, x2), x2))) for j in range(layers[1][i]): if j == layers[1][i] - 1: out_channels = channels_per_stage else: out_channels = in_channels if j == 0: unit = nn.MaxUnpool2d( kernel_size=2, stride=2) else: unit = conv3x3_block( in_channels=in_channels, out_channels=out_channels, bias=bias) stage.add_module("unit{}".format(j + 1), unit) in_channels = out_channels setattr(self, "up_stage{}".format(i + 1), stage) self.head = conv3x3( in_channels=in_channels, out_channels=num_classes, bias=bias) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x, max_indices1 = self.down_stage1(x) x, max_indices2 = self.down_stage2(x) x, max_indices3 = self.down_stage3(x) x, max_indices4 = self.down_stage4(x) x, max_indices5 = self.down_stage5(x) x = self.up_stage1(x, max_indices5) x = self.up_stage2(x, max_indices4) x = self.up_stage3(x, max_indices3) x = self.up_stage4(x, max_indices2) x = self.up_stage5(x, max_indices1) x = self.head(x) return x def get_segnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SegNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [[64, 128, 256, 512, 512], [512, 256, 128, 64, 64]] layers = [[3, 3, 4, 4, 4], [4, 4, 4, 3, 2]] net = SegNet( channels=channels, layers=layers, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def segnet_cityscapes(num_classes=19, **kwargs): """ SegNet model for Cityscapes from 'SegNet: A Deep Convolutional Encoder-Decoder Architecture for Image Segmentation,' https://arxiv.org/abs/1511.00561. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_segnet(num_classes=num_classes, model_name="segnet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ segnet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != segnet_cityscapes or weight_count == 29453971) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
7,072
31.74537
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/deeplabv3.py
""" DeepLabv3 for image segmentation, implemented in PyTorch. Original paper: 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. """ __all__ = ['DeepLabv3', 'deeplabv3_resnetd50b_voc', 'deeplabv3_resnetd101b_voc', 'deeplabv3_resnetd152b_voc', 'deeplabv3_resnetd50b_coco', 'deeplabv3_resnetd101b_coco', 'deeplabv3_resnetd152b_coco', 'deeplabv3_resnetd50b_ade20k', 'deeplabv3_resnetd101b_ade20k', 'deeplabv3_resnetd50b_cityscapes', 'deeplabv3_resnetd101b_cityscapes'] import os import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent from .resnetd import resnetd50b, resnetd101b, resnetd152b class DeepLabv3FinalBlock(nn.Module): """ DeepLabv3 final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, bottleneck_factor=4): super(DeepLabv3FinalBlock, self).__init__() assert (in_channels % bottleneck_factor == 0) mid_channels = in_channels // bottleneck_factor self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels) self.dropout = nn.Dropout(p=0.1, inplace=False) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, bias=True) def forward(self, x, out_size): x = self.conv1(x) x = self.dropout(x) x = self.conv2(x) x = F.interpolate(x, size=out_size, mode="bilinear", align_corners=True) return x class ASPPAvgBranch(nn.Module): """ ASPP branch with average pooling. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. upscale_out_size : tuple of 2 int Spatial size of output image for the bilinear upsampling operation. """ def __init__(self, in_channels, out_channels, upscale_out_size): super(ASPPAvgBranch, self).__init__() self.upscale_out_size = upscale_out_size self.pool = nn.AdaptiveAvgPool2d(1) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels) def forward(self, x): in_size = self.upscale_out_size if self.upscale_out_size is not None else x.shape[2:] x = self.pool(x) x = self.conv(x) x = F.interpolate(x, size=in_size, mode="bilinear", align_corners=True) return x class AtrousSpatialPyramidPooling(nn.Module): """ Atrous Spatial Pyramid Pooling (ASPP) module. Parameters: ---------- in_channels : int Number of input channels. upscale_out_size : tuple of 2 int Spatial size of the input tensor for the bilinear upsampling operation. """ def __init__(self, in_channels, upscale_out_size): super(AtrousSpatialPyramidPooling, self).__init__() atrous_rates = [12, 24, 36] assert (in_channels % 8 == 0) mid_channels = in_channels // 8 project_in_channels = 5 * mid_channels self.branches = Concurrent() self.branches.add_module("branch1", conv1x1_block( in_channels=in_channels, out_channels=mid_channels)) for i, atrous_rate in enumerate(atrous_rates): self.branches.add_module("branch{}".format(i + 2), conv3x3_block( in_channels=in_channels, out_channels=mid_channels, padding=atrous_rate, dilation=atrous_rate)) self.branches.add_module("branch5", ASPPAvgBranch( in_channels=in_channels, out_channels=mid_channels, upscale_out_size=upscale_out_size)) self.conv = conv1x1_block( in_channels=project_in_channels, out_channels=mid_channels) self.dropout = nn.Dropout(p=0.5, inplace=False) def forward(self, x): x = self.branches(x) x = self.conv(x) x = self.dropout(x) return x class DeepLabv3(nn.Module): """ DeepLabv3 model from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int, default 2048 Number of output channels form feature extractor. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default True Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (480, 480) Spatial size of the expected input image. num_classes : int, default 21 Number of segmentation classes. """ def __init__(self, backbone, backbone_out_channels=2048, aux=False, fixed_size=True, in_channels=3, in_size=(480, 480), num_classes=21): super(DeepLabv3, self).__init__() assert (in_channels > 0) self.in_size = in_size self.num_classes = num_classes self.aux = aux self.fixed_size = fixed_size self.backbone = backbone pool_out_size = (self.in_size[0] // 8, self.in_size[1] // 8) if fixed_size else None self.pool = AtrousSpatialPyramidPooling( in_channels=backbone_out_channels, upscale_out_size=pool_out_size) pool_out_channels = backbone_out_channels // 8 self.final_block = DeepLabv3FinalBlock( in_channels=pool_out_channels, out_channels=num_classes, bottleneck_factor=1) if self.aux: aux_out_channels = backbone_out_channels // 2 self.aux_block = DeepLabv3FinalBlock( in_channels=aux_out_channels, out_channels=num_classes, bottleneck_factor=4) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): in_size = self.in_size if self.fixed_size else x.shape[2:] x, y = self.backbone(x) x = self.pool(x) x = self.final_block(x, in_size) if self.aux: y = self.aux_block(y, in_size) return x, y else: return x def get_deeplabv3(backbone, num_classes, aux=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DeepLabv3 model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. num_classes : int Number of segmentation classes. aux : bool, default False Whether to output an auxiliary result. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = DeepLabv3( backbone=backbone, num_classes=num_classes, aux=aux, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def deeplabv3_resnetd50b_voc(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-50b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_deeplabv3(backbone=backbone, num_classes=num_classes, aux=aux, model_name="deeplabv3_resnetd50b_voc", **kwargs) def deeplabv3_resnetd101b_voc(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-101b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_deeplabv3(backbone=backbone, num_classes=num_classes, aux=aux, model_name="deeplabv3_resnetd101b_voc", **kwargs) def deeplabv3_resnetd152b_voc(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-152b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_deeplabv3(backbone=backbone, num_classes=num_classes, aux=aux, model_name="deeplabv3_resnetd152b_voc", **kwargs) def deeplabv3_resnetd50b_coco(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-50b for COCO from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_deeplabv3(backbone=backbone, num_classes=num_classes, aux=aux, model_name="deeplabv3_resnetd50b_coco", **kwargs) def deeplabv3_resnetd101b_coco(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-101b for COCO from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_deeplabv3(backbone=backbone, num_classes=num_classes, aux=aux, model_name="deeplabv3_resnetd101b_coco", **kwargs) def deeplabv3_resnetd152b_coco(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-152b for COCO from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_deeplabv3(backbone=backbone, num_classes=num_classes, aux=aux, model_name="deeplabv3_resnetd152b_coco", **kwargs) def deeplabv3_resnetd50b_ade20k(pretrained_backbone=False, num_classes=150, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-50b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 150 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_deeplabv3(backbone=backbone, num_classes=num_classes, aux=aux, model_name="deeplabv3_resnetd50b_ade20k", **kwargs) def deeplabv3_resnetd101b_ade20k(pretrained_backbone=False, num_classes=150, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-101b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 150 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_deeplabv3(backbone=backbone, num_classes=num_classes, aux=aux, model_name="deeplabv3_resnetd101b_ade20k", **kwargs) def deeplabv3_resnetd50b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-50b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_deeplabv3(backbone=backbone, num_classes=num_classes, aux=aux, model_name="deeplabv3_resnetd50b_cityscapes", **kwargs) def deeplabv3_resnetd101b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs): """ DeepLabv3 model on the base of ResNet(D)-101b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_deeplabv3(backbone=backbone, num_classes=num_classes, aux=aux, model_name="deeplabv3_resnetd101b_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch in_size = (480, 480) aux = True pretrained = False models = [ (deeplabv3_resnetd50b_voc, 21), (deeplabv3_resnetd101b_voc, 21), (deeplabv3_resnetd152b_voc, 21), (deeplabv3_resnetd50b_coco, 21), (deeplabv3_resnetd101b_coco, 21), (deeplabv3_resnetd152b_coco, 21), (deeplabv3_resnetd50b_ade20k, 150), (deeplabv3_resnetd101b_ade20k, 150), (deeplabv3_resnetd50b_cityscapes, 19), (deeplabv3_resnetd101b_cityscapes, 19), ] for model, num_classes in models: net = model(pretrained=pretrained, in_size=in_size, aux=aux) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) if aux: assert (model != deeplabv3_resnetd50b_voc or weight_count == 42127850) assert (model != deeplabv3_resnetd101b_voc or weight_count == 61119978) assert (model != deeplabv3_resnetd152b_voc or weight_count == 76763626) assert (model != deeplabv3_resnetd50b_coco or weight_count == 42127850) assert (model != deeplabv3_resnetd101b_coco or weight_count == 61119978) assert (model != deeplabv3_resnetd152b_coco or weight_count == 76763626) assert (model != deeplabv3_resnetd50b_ade20k or weight_count == 42194156) assert (model != deeplabv3_resnetd101b_ade20k or weight_count == 61186284) assert (model != deeplabv3_resnetd50b_cityscapes or weight_count == 42126822) assert (model != deeplabv3_resnetd101b_cityscapes or weight_count == 61118950) else: assert (model != deeplabv3_resnetd50b_voc or weight_count == 39762645) assert (model != deeplabv3_resnetd101b_voc or weight_count == 58754773) assert (model != deeplabv3_resnetd152b_voc or weight_count == 74398421) assert (model != deeplabv3_resnetd50b_coco or weight_count == 39762645) assert (model != deeplabv3_resnetd101b_coco or weight_count == 58754773) assert (model != deeplabv3_resnetd152b_coco or weight_count == 74398421) assert (model != deeplabv3_resnetd50b_ade20k or weight_count == 39795798) assert (model != deeplabv3_resnetd101b_ade20k or weight_count == 58787926) assert (model != deeplabv3_resnetd50b_cityscapes or weight_count == 39762131) assert (model != deeplabv3_resnetd101b_cityscapes or weight_count == 58754259) x = torch.randn(1, 3, in_size[0], in_size[1]) ys = net(x) y = ys[0] if aux else ys y.sum().backward() assert ((y.size(0) == x.size(0)) and (y.size(1) == num_classes) and (y.size(2) == x.size(2)) and (y.size(3) == x.size(3))) if __name__ == "__main__": _test()
21,944
37.840708
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/fpenet.py
""" FPENet for image segmentation, implemented in PyTorch. Original paper: 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1909.08599. """ __all__ = ['FPENet', 'fpenet_cityscapes'] import os import torch import torch.nn as nn from .common import conv1x1, conv1x1_block, conv3x3_block, SEBlock, InterpolationBlock, MultiOutputSequential class FPEBlock(nn.Module): """ FPENet block. Parameters: ---------- channels : int Number of input/output channels. """ def __init__(self, channels): super(FPEBlock, self).__init__() dilations = [1, 2, 4, 8] assert (channels % len(dilations) == 0) mid_channels = channels // len(dilations) self.blocks = nn.Sequential() for i, dilation in enumerate(dilations): self.blocks.add_module("block{}".format(i + 1), conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, groups=mid_channels, dilation=dilation, padding=dilation)) def forward(self, x): xs = torch.chunk(x, chunks=len(self.blocks._modules), dim=1) ys = [] for bi, xsi in zip(self.blocks._modules.values(), xs): if len(ys) == 0: ys.append(bi(xsi)) else: ys.append(bi(xsi + ys[-1])) x = torch.cat(ys, dim=1) return x class FPEUnit(nn.Module): """ FPENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck_factor : int Bottleneck factor. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, stride, bottleneck_factor, use_se): super(FPEUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.use_se = use_se mid1_channels = in_channels * bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid1_channels, stride=stride) self.block = FPEBlock(channels=mid1_channels) self.conv2 = conv1x1_block( in_channels=mid1_channels, out_channels=out_channels, activation=None) if self.use_se: self.se = SEBlock(channels=out_channels) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.conv1(x) x = self.block(x) x = self.conv2(x) if self.use_se: x = self.se(x) x = x + identity x = self.activ(x) return x class FPEStage(nn.Module): """ FPENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. layers : int Number of layers. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, layers, use_se): super(FPEStage, self).__init__() self.use_block = (layers > 1) if self.use_block: self.down = FPEUnit( in_channels=in_channels, out_channels=out_channels, stride=2, bottleneck_factor=4, use_se=use_se) self.blocks = nn.Sequential() for i in range(layers - 1): self.blocks.add_module("block{}".format(i + 1), FPEUnit( in_channels=out_channels, out_channels=out_channels, stride=1, bottleneck_factor=1, use_se=use_se)) else: self.down = FPEUnit( in_channels=in_channels, out_channels=out_channels, stride=1, bottleneck_factor=1, use_se=use_se) def forward(self, x): x = self.down(x) if self.use_block: y = self.blocks(x) x = x + y return x class MEUBlock(nn.Module): """ FPENet specific mutual embedding upsample (MEU) block. Parameters: ---------- in_channels_high : int Number of input channels for x_high. in_channels_low : int Number of input channels for x_low. out_channels : int Number of output channels. """ def __init__(self, in_channels_high, in_channels_low, out_channels): super(MEUBlock, self).__init__() self.conv_high = conv1x1_block( in_channels=in_channels_high, out_channels=out_channels, activation=None) self.conv_low = conv1x1_block( in_channels=in_channels_low, out_channels=out_channels, activation=None) self.pool = nn.AdaptiveAvgPool2d(1) self.conv_w_high = conv1x1( in_channels=out_channels, out_channels=out_channels) self.conv_w_low = conv1x1( in_channels=1, out_channels=1) self.sigmoid = nn.Sigmoid() self.relu = nn.ReLU(inplace=True) self.up = InterpolationBlock( scale_factor=2, align_corners=True) def forward(self, x_high, x_low): x_high = self.conv_high(x_high) x_low = self.conv_low(x_low) w_high = self.pool(x_high) w_high = self.conv_w_high(w_high) w_high = self.relu(w_high) w_high = self.sigmoid(w_high) w_low = x_low.mean(dim=1, keepdim=True) w_low = self.conv_w_low(w_low) w_low = self.sigmoid(w_low) x_high = self.up(x_high) x_high = x_high * w_low x_low = x_low * w_high out = x_high + x_low return out class FPENet(nn.Module): """ FPENet model from 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1909.08599. Parameters: ---------- layers : list of int Number of layers for each unit. channels : list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. meu_channels : list of int Number of output channels for MEU blocks. use_se : bool Whether to use SE-module. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, layers, channels, init_block_channels, meu_channels, use_se, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(FPENet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size self.stem = conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2) in_channels = init_block_channels self.encoder = MultiOutputSequential(return_last=False) for i, (layers_i, out_channels) in enumerate(zip(layers, channels)): stage = FPEStage( in_channels=in_channels, out_channels=out_channels, layers=layers_i, use_se=use_se) stage.do_output = True self.encoder.add_module("stage{}".format(i + 1), stage) in_channels = out_channels self.meu1 = MEUBlock( in_channels_high=channels[-1], in_channels_low=channels[-2], out_channels=meu_channels[0]) self.meu2 = MEUBlock( in_channels_high=meu_channels[0], in_channels_low=channels[-3], out_channels=meu_channels[1]) in_channels = meu_channels[1] self.classifier = conv1x1( in_channels=in_channels, out_channels=num_classes, bias=True) self.up = InterpolationBlock( scale_factor=2, align_corners=True) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.stem(x) y = self.encoder(x) x = self.meu1(y[2], y[1]) x = self.meu2(x, y[0]) x = self.classifier(x) x = self.up(x) return x def get_fpenet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create FPENet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ width = 16 channels = [int(width * (2 ** i)) for i in range(3)] init_block_channels = width layers = [1, 3, 9] meu_channels = [64, 32] use_se = False net = FPENet( layers=layers, channels=channels, init_block_channels=init_block_channels, meu_channels=meu_channels, use_se=use_se, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def fpenet_cityscapes(num_classes=19, **kwargs): """ FPENet model for Cityscapes from 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1909.08599. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fpenet(num_classes=num_classes, model_name="fpenet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False in_size = (1024, 2048) models = [ fpenet_cityscapes, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != fpenet_cityscapes or weight_count == 115125) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, 19, in_size[0], in_size[1])) if __name__ == "__main__": _test()
12,630
28.511682
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/irevnet.py
""" i-RevNet for ImageNet-1K, implemented in PyTorch. Original paper: 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088. """ __all__ = ['IRevNet', 'irevnet301', 'IRevDownscale', 'IRevSplitBlock', 'IRevMergeBlock'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv3x3, pre_conv3x3_block, DualPathSequential class IRevDualPathSequential(DualPathSequential): """ An invertible sequential container for modules with dual inputs/outputs. Modules will be executed in the order they are added. Parameters: ---------- return_two : bool, default True Whether to return two output after execution. first_ordinals : int, default 0 Number of the first modules with single input/output. last_ordinals : int, default 0 Number of the final modules with single input/output. dual_path_scheme : function Scheme of dual path response for a module. dual_path_scheme_ordinal : function Scheme of dual path response for an ordinal module. last_noninvertible : int, default 0 Number of the final modules skipped during inverse. """ def __init__(self, return_two=True, first_ordinals=0, last_ordinals=0, dual_path_scheme=(lambda module, x1, x2: module(x1, x2)), dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2)), last_noninvertible=0): super(IRevDualPathSequential, self).__init__( return_two=return_two, first_ordinals=first_ordinals, last_ordinals=last_ordinals, dual_path_scheme=dual_path_scheme, dual_path_scheme_ordinal=dual_path_scheme_ordinal) self.last_noninvertible = last_noninvertible def inverse(self, x1, x2=None): length = len(self._modules.values()) for i, module in enumerate(reversed(self._modules.values())): if i < self.last_noninvertible: pass elif (i < self.last_ordinals) or (i >= length - self.first_ordinals): x1, x2 = self.dual_path_scheme_ordinal(module.inverse, x1, x2) else: x1, x2 = self.dual_path_scheme(module.inverse, x1, x2) if self.return_two: return x1, x2 else: return x1 class IRevDownscale(nn.Module): """ i-RevNet specific downscale (so-called psi-block). Parameters: ---------- scale : int Scale (downscale) value. """ def __init__(self, scale): super(IRevDownscale, self).__init__() self.scale = scale def forward(self, x): batch, x_channels, x_height, x_width = x.size() y_channels = x_channels * self.scale * self.scale assert (x_height % self.scale == 0) y_height = x_height // self.scale y = x.permute(0, 2, 3, 1) d2_split_seq = y.split(split_size=self.scale, dim=2) d2_split_seq = [t.contiguous().view(batch, y_height, y_channels) for t in d2_split_seq] y = torch.stack(d2_split_seq, dim=1) y = y.permute(0, 3, 2, 1) return y.contiguous() def inverse(self, y): scale_sqr = self.scale * self.scale batch, y_channels, y_height, y_width = y.size() assert (y_channels % scale_sqr == 0) x_channels = y_channels // scale_sqr x_height = y_height * self.scale x_width = y_width * self.scale x = y.permute(0, 2, 3, 1) x = x.contiguous().view(batch, y_height, y_width, scale_sqr, x_channels) d3_split_seq = x.split(split_size=self.scale, dim=3) d3_split_seq = [t.contiguous().view(batch, y_height, x_width, x_channels) for t in d3_split_seq] x = torch.stack(d3_split_seq, dim=0) x = x.transpose(0, 1).permute(0, 2, 1, 3, 4).contiguous().view(batch, x_height, x_width, x_channels) x = x.permute(0, 3, 1, 2) return x.contiguous() class IRevInjectivePad(nn.Module): """ i-RevNet channel zero padding block. Parameters: ---------- padding : int Size of the padding. """ def __init__(self, padding): super(IRevInjectivePad, self).__init__() self.padding = padding self.pad = nn.ZeroPad2d(padding=(0, 0, 0, padding)) def forward(self, x): x = x.permute(0, 2, 1, 3) x = self.pad(x) return x.permute(0, 2, 1, 3) def inverse(self, x): return x[:, :x.size(1) - self.padding, :, :] class IRevSplitBlock(nn.Module): """ iRevNet split block. """ def __init__(self): super(IRevSplitBlock, self).__init__() def forward(self, x, _): x1, x2 = torch.chunk(x, chunks=2, dim=1) return x1, x2 def inverse(self, x1, x2): x = torch.cat((x1, x2), dim=1) return x, None class IRevMergeBlock(nn.Module): """ iRevNet merge block. """ def __init__(self): super(IRevMergeBlock, self).__init__() def forward(self, x1, x2): x = torch.cat((x1, x2), dim=1) return x, x def inverse(self, x, _): x1, x2 = torch.chunk(x, chunks=2, dim=1) return x1, x2 class IRevBottleneck(nn.Module): """ iRevNet bottleneck block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the branch convolution layers. preactivate : bool Whether use pre-activation for the first convolution block. """ def __init__(self, in_channels, out_channels, stride, preactivate): super(IRevBottleneck, self).__init__() mid_channels = out_channels // 4 if preactivate: self.conv1 = pre_conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=stride) else: self.conv1 = conv3x3( in_channels=in_channels, out_channels=mid_channels, stride=stride) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=mid_channels) self.conv3 = pre_conv3x3_block( in_channels=mid_channels, out_channels=out_channels) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class IRevUnit(nn.Module): """ iRevNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the branch convolution layers. preactivate : bool Whether use pre-activation for the first convolution block. """ def __init__(self, in_channels, out_channels, stride, preactivate): super(IRevUnit, self).__init__() if not preactivate: in_channels = in_channels // 2 padding = 2 * (out_channels - in_channels) self.do_padding = (padding != 0) and (stride == 1) self.do_downscale = (stride != 1) if self.do_padding: self.pad = IRevInjectivePad(padding) self.bottleneck = IRevBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, preactivate=preactivate) if self.do_downscale: self.psi = IRevDownscale(stride) def forward(self, x1, x2): if self.do_padding: x = torch.cat((x1, x2), dim=1) x = self.pad(x) x1, x2 = torch.chunk(x, chunks=2, dim=1) fx2 = self.bottleneck(x2) if self.do_downscale: x1 = self.psi(x1) x2 = self.psi(x2) y1 = fx2 + x1 return x2, y1 def inverse(self, x2, y1): if self.do_downscale: x2 = self.psi.inverse(x2) fx2 = - self.bottleneck(x2) x1 = fx2 + y1 if self.do_downscale: x1 = self.psi.inverse(x1) if self.do_padding: x = torch.cat((x1, x2), dim=1) x = self.pad.inverse(x) x1, x2 = torch.chunk(x, chunks=2, dim=1) return x1, x2 class IRevPostActivation(nn.Module): """ iRevNet specific post-activation block. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(IRevPostActivation, self).__init__() self.bn = nn.BatchNorm2d( num_features=in_channels, momentum=0.9) self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.bn(x) x = self.activ(x) return x class IRevNet(nn.Module): """ i-RevNet model from 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, in_channels=3, in_size=(224, 224), num_classes=1000): super(IRevNet, self).__init__() assert (in_channels > 0) self.in_size = in_size self.num_classes = num_classes self.features = IRevDualPathSequential( first_ordinals=1, last_ordinals=2, last_noninvertible=2) self.features.add_module("init_block", IRevDownscale(scale=2)) in_channels = init_block_channels self.features.add_module("init_split", IRevSplitBlock()) for i, channels_per_stage in enumerate(channels): stage = IRevDualPathSequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) else 1 preactivate = not ((i == 0) and (j == 0)) stage.add_module("unit{}".format(j + 1), IRevUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, preactivate=preactivate)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) in_channels = final_block_channels self.features.add_module("final_merge", IRevMergeBlock()) self.features.add_module("final_postactiv", IRevPostActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x, return_out_bij=False): x, out_bij = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) if return_out_bij: return x, out_bij else: return x def inverse(self, out_bij): x, _ = self.features.inverse(out_bij) return x def get_irevnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create i-RevNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 301: layers = [6, 16, 72, 6] else: raise ValueError("Unsupported i-RevNet with number of blocks: {}".format(blocks)) assert (sum(layers) * 3 + 1 == blocks) channels_per_layers = [24, 96, 384, 1536] init_block_channels = 12 final_block_channels = 3072 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = IRevNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def irevnet301(**kwargs): """ i-RevNet-301 model from 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_irevnet(blocks=301, model_name="irevnet301", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ irevnet301, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != irevnet301 or weight_count == 125120356) x = torch.randn(2, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (2, 1000)) y, out_bij = net(x, return_out_bij=True) x_ = net.inverse(out_bij) assert (tuple(x_.size()) == (2, 3, 224, 224)) import numpy as np assert (np.max(np.abs(x.detach().numpy() - x_.detach().numpy())) < 1e-4) if __name__ == "__main__": _test()
15,151
29.796748
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/model_store.py
""" Model store which provides pretrained models. """ __all__ = ['get_model_file', 'load_model', 'download_model', 'calc_num_params'] import os import zipfile import logging import hashlib _model_sha1 = {name: (error, checksum, repo_release_tag, caption, paper, ds, img_size, scale, batch, rem) for name, error, checksum, repo_release_tag, caption, paper, ds, img_size, scale, batch, rem in [ ('alexnet', '1664', '2768cdb312d584e33e93f31b0c569589bb289749', 'v0.0.481', 'AlexNet', '1404.5997', 'in1k', 224, 0.875, 200, ''), # noqa ('alexnetb', '1747', 'ac887bf7eada4179857d243584ac30b4d74a6493', 'v0.0.485', 'AlexNet-b', '1404.5997', 'in1k', 224, 0.875, 200, ''), # noqa ('zfnet', '1727', 'd010ddca1eb32a50a8cceb475c792f53e769b631', 'v0.0.395', 'ZFNet', '1311.2901', 'in1k', 224, 0.875, 200, ''), # noqa ('zfnetb', '1490', 'f6bec24eba037c8e4956704ed5bafaed29966601', 'v0.0.400', 'ZFNet-b', '1311.2901', 'in1k', 224, 0.875, 200, ''), # noqa ('vgg11', '1036', '71e85f6ef76f56e3e89d597d2fc461496ed281e9', 'v0.0.381', 'VGG-11', '1409.1556', 'in1k', 224, 0.875, 200, ''), # noqa ('vgg13', '0975', '2b2c8770a7610d9dcd444ec8ae992681e270eb42', 'v0.0.388', 'VGG-13', '1409.1556', 'in1k', 224, 0.875, 200, ''), # noqa ('vgg16', '0865', '5ca155da3dc6687e070ff34815cb5aabd0bed4b9', 'v0.0.401', 'VGG-16', '1409.1556', 'in1k', 224, 0.875, 200, ''), # noqa ('vgg19', '0790', '9bd923a82ece9f038e944d7666f1c11b478dc7e6', 'v0.0.420', 'VGG-19', '1409.1556', 'in1k', 224, 0.875, 200, ''), # noqa ('bn_vgg11', '0961', '10f01fba064ec168df074b98d59ae7b82b1207d4', 'v0.0.339', 'BN-VGG-11', '1409.1556', 'in1k', 224, 0.875, 200, ''), # noqa ('bn_vgg13', '0913', 'b1acd7158e6e9973ce9e274c65ceb64a244f9967', 'v0.0.353', 'BN-VGG-13', '1409.1556', 'in1k', 224, 0.875, 200, ''), # noqa ('bn_vgg16', '0779', '0f570b928b180f909fa39df3924f89c746816722', 'v0.0.359', 'BN-VGG-16', '1409.1556', 'in1k', 224, 0.875, 200, ''), # noqa ('bn_vgg19', '0712', '3f286cbd2a57abb4c516425c5e095c2cfc8d54e3', 'v0.0.360', 'BN-VGG-19', '1409.1556', 'in1k', 224, 0.875, 200, ''), # noqa ('bn_vgg11b', '0996', 'ef747edc87705e1ed500a31c80199273b2fbd5fa', 'v0.0.407', 'BN-VGG-11b', '1409.1556', 'in1k', 224, 0.875, 200, ''), # noqa ('bn_vgg13b', '0924', '5f313c535fc47c3ad6bd2f741f453dbcf8191be6', 'v0.0.488', 'BN-VGG-13b', '1409.1556', 'in1k', 224, 0.875, 200, ''), # noqa ('bn_vgg16b', '0795', 'bfff365ac38a763aaed4b4d9bdc7b2cdbe6c8e9f', 'v0.0.489', 'BN-VGG-16b', '1409.1556', 'in1k', 224, 0.875, 200, ''), # noqa ('bn_vgg19b', '0746', 'f523b4e4b070a170f63e9bb6965fca3764751aa9', 'v0.0.490', 'BN-VGG-19b', '1409.1556', 'in1k', 224, 0.875, 200, ''), # noqa ('bninception', '0774', 'd79ba5f573ba2da5fea5e4c9a7f67ddd526e234b', 'v0.0.405', 'BN-Inception', '1502.03167', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet10', '1253', '88a5961b62448ef51d57e749675cdb097695a634', 'v0.0.569', 'ResNet-10', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet12', '1223', '84a43cf672c708a016dd1142ca1a23c278931532', 'v0.0.485', 'ResNet-12', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet14', '1109', 'b3132cbfb7d64ae83b1cd2e3954f4c5b1180fd7b', 'v0.0.491', 'ResNet-14', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnetbc14b', '1074', '14b1fd95d8b7964c0e7c6eba22f6f58db03d3df0', 'v0.0.481', 'ResNet-BC-14b', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet16', '1009', '4352d6a91d6e28aa839f741006a5a41cfa62bfd6', 'v0.0.493', 'ResNet-16', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet18_wd4', '1785', 'fe79b31f56e7becab9c014dbc14ccdb564b5148f', 'v0.0.262', 'ResNet-18 x0.25', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet18_wd2', '1327', '6654f50ad357f4596502b92b3dca2147776089ac', 'v0.0.263', 'ResNet-18 x0.5', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet18_w3d4', '1106', '3636648b504e1ba134947743eb34dd0e78feda02', 'v0.0.266', 'ResNet-18 x0.75', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet18', '0896', '77a56f155214819bfc79ff09795370f955b20e6d', 'v0.0.478', 'ResNet-18', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet26', '0849', '4bfbc640f218e0eaf4c380cfdb98d55f259862d6', 'v0.0.489', 'ResNet-26', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnetbc26b', '0797', '7af52a73b234dc56ab4b0757cf3ea772d0699622', 'v0.0.313', 'ResNet-BC-26b', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet34', '0780', '3f775482a327e5fc4850fbb77785bfc55e171e5f', 'v0.0.291', 'ResNet-34', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnetbc38b', '0700', '3fbac61d86810d489988a92f425f1a6bfe46f155', 'v0.0.328', 'ResNet-BC-38b', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet50', '0633', 'b00d1c8e52aa7a2badc705b1545aaf6ccece6ce9', 'v0.0.329', 'ResNet-50', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet50b', '0638', '8a5473ef985d65076a3758117ad5700d726bd952', 'v0.0.308', 'ResNet-50b', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet101', '0540', '65faf44721096a75fa72b875efb416513f864078', 'v0.0.499', 'ResNet-101', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet101b', '0530', 'f059ba3c7fa4a65f2da6e17f3718662d59836637', 'v0.0.357', 'ResNet-101b', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet152', '0468', 'd46977ddb5660bb523e9f2de50e5d16cef8e3027', 'v0.0.518', 'ResNet-152', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('resnet152b', '0445', '2f420307673264444e8457e2050b5d6b131002d7', 'v0.0.517', 'ResNet-152b', '1512.03385', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet10', '1421', 'b3973cd4461287d61df081d6f689d293eacf2248', 'v0.0.249', 'PrepResNet-10', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet12', '1348', '563066fa8fcf8b5f19906b933fea784965d68192', 'v0.0.257', 'PreResNet-12', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet14', '1239', '4be725fd3f06c99c46817fce3b69caf2ebc62414', 'v0.0.260', 'PreResNet-14', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnetbc14b', '1181', 'a68d31c372e647474ae954e51e5bc2ba9fb3f166', 'v0.0.315', 'PreResNet=BC-14b', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet16', '1108', '06d8c87e29284dac19a9019485e210541532411a', 'v0.0.261', 'PreResNet-16', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet18_wd4', '1811', '41135c15210390e9a564b14e8ae2ebda1a662ec1', 'v0.0.272', 'PreResNet-18 x0.25', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet18_wd2', '1340', 'c1fe4e314188eeb93302432d03731a91ce8bc9f2', 'v0.0.273', 'PreResNet-18 x0.5', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet18_w3d4', '1105', 'ed2f9ca434b6910b92657eefc73ad186396578d5', 'v0.0.274', 'PreResNet-18 x0.75', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet18', '0972', '5651bc2dbb200382822a6b64375d240f747cc726', 'v0.0.140', 'PreResNet-18', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet26', '0851', '99e7d6cc5944cd7cf6d4746e6fdf18b477d3d9a0', 'v0.0.316', 'PreResNet-26', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnetbc26b', '0803', 'd7283bdd70e1b75520fe2cdcc273d51715e077b4', 'v0.0.325', 'PreResNet-BC-26b', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet34', '0774', 'fd5bd1e883048e29099768465df2dd9e891803f4', 'v0.0.300', 'PreResNet-34', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnetbc38b', '0657', '9e523bb92dc592ee576a6bb73a328dc024bdc967', 'v0.0.348', 'PreResNet-BC-38b', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet50', '0647', '222ca73b021f893b925c15e24ea2a6bc0fdf2546', 'v0.0.330', 'PreResNet-50', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet50b', '0655', '8b60378ee3aed878d27a2b4a9ddc596a812c7649', 'v0.0.307', 'PreResNet-50b', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet101', '0563', '8ec82f7d697b7329aea2c95b399093e9cb2b1114', 'v0.0.504', 'PreResNet-101', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet101b', '0556', '76bfe6d020b55f163e77de6b1c27be6b0bed8b7b', 'v0.0.351', 'PreResNet-101b', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet152', '0464', 'baeb6c5208310ab7c919fc0da3c20267471a8fa1', 'v0.0.510', 'PreResNet-152', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet152b', '0459', '42c9fbcfe4e92463497fa4c2d0b007a191c6c043', 'v0.0.523', 'PreResNet-152b', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet200b', '0468', 'f82215f3a5616098e8172a85bb42071f1823a27d', 'v0.0.529', 'PreResNet-200b', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('preresnet269b', '0527', 'f38eca01ea8cf43d6e6fecf1fe6b1a6cd5725cb2', 'v0.0.545', 'PreResNet-269b', '1603.05027', 'in1k', 224, 0.875, 200, ''), # noqa ('resnext14_16x4d', '1248', '35ffac2a26374e71b6bf4bc9f90b7a1a1dd47e7d', 'v0.0.370', 'ResNeXt-14 (16x4d)', '1611.05431', 'in1k', 224, 0.875, 200, ''), # noqa ('resnext14_32x2d', '1281', '14521186b8c78c7c07f3904360839f22c180f65e', 'v0.0.371', 'ResNeXt-14 (32x2d)', '1611.05431', 'in1k', 224, 0.875, 200, ''), # noqa ('resnext14_32x4d', '1146', '89aa679393d8356ce5589749b4371714bf4ceac0', 'v0.0.327', 'ResNeXt-14 (32x4d)', '1611.05431', 'in1k', 224, 0.875, 200, ''), # noqa ('resnext26_32x2d', '0887', 'c3bd130747909a8c89546f3b3f5ce08bb4f55731', 'v0.0.373', 'ResNeXt-26 (32x2d)', '1611.05431', 'in1k', 224, 0.875, 200, ''), # noqa ('resnext26_32x4d', '0746', '1011ac35e30d753b79f0600a5376c87a37b67a61', 'v0.0.332', 'ResNeXt-26 (32x4d)', '1611.05431', 'in1k', 224, 0.875, 200, ''), # noqa ('resnext50_32x4d', '0560', 'd7976503d13734114364e0dfef1d22f6d76546d9', 'v0.0.498', 'ResNeXt-50 (32x4d)', '1611.05431', 'in1k', 224, 0.875, 200, ''), # noqa ('resnext101_32x4d', '0434', '5ac165981bac62627719b3362b31b456cba05df4', 'v0.0.530', 'ResNeXt-101 (32x4d)', '1611.05431', 'in1k', 224, 0.875, 200, ''), # noqa ('resnext101_64x4d', '0452', '60d1913ec591af7786056b1d87b3add07fdcf2e1', 'v0.0.544', 'ResNeXt-101 (64x4d)', '1611.05431', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnet10', '1202', '8dace12e6aaac68d3c272f52b2513a5b40a4f959', 'v0.0.486', 'SE-ResNet-10', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnet12', '1200', '81d5406e29f4c91cb85e079cf66c6e7348079e5b', 'v0.0.544', 'SE-ResNet-12', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnet14', '1128', '2afa45c6a2a8cad376e994fc690b9f72cffdc875', 'v0.0.545', 'SE-ResNet-14', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnet16', '0998', 'e2c666dd14dec8918854df7200706ed0c5ae8e74', 'v0.0.545', 'SE-ResNet-16', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnet18', '0961', '022123a5e88c9917e63165f5b5a7808a606d452a', 'v0.0.355', 'SE-ResNet-18', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnet26', '0824', '64fc8759c5bb9b9b40b2e33a46420ee22ae268c9', 'v0.0.363', 'SE-ResNet-26', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnetbc26b', '0703', 'b98d9d6afca4d79d0347001542162b9fe4071d39', 'v0.0.366', 'SE-ResNet-BC-26b', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnetbc38b', '0595', '03671c05f5f684b44085383b7b89a8b44a7524fe', 'v0.0.374', 'SE-ResNet-BC-38b', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnet50', '0575', '004bfde422c860c4f11b1e1190bb5a8db477d939', 'v0.0.441', 'SE-ResNet-50', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnet50b', '0539', '459e6871e944d1c7102ee9c055ea428b8d9a168c', 'v0.0.387', 'SE-ResNet-50b', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnet101', '0460', '37851448605dae67bbf83ff8e7f7e7cc367e1746', 'v0.0.533', 'SE-ResNet-101', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnet101b', '0487', 'b83a20fd2ad9a32e0fe5cb3daef45aac03ea3194', 'v0.0.460', 'SE-ResNet-101b', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnet152', '0439', '7a6b02ac25caccb0420eea542c625f9b0bfb3e03', 'v0.0.538', 'SE-ResNet-152', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('sepreresnet10', '1257', 'a08d5c618ebf6bca046f826366e7cd6fbe40851b', 'v0.0.544', 'SE-PreResNet-10', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('sepreresnet12', '1203', '4f8d63e2a1841b0a1b5bae5caa46770c3f183055', 'v0.0.543', 'SE-PreResNet-12', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('sepreresnet16', '0975', '251c11a4886ba81d7ac377ace5ab0172101f1b53', 'v0.0.543', 'SE-PreResNet-16', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('sepreresnet18', '0909', 'cd3cc116f96254d5d664f1c322bbc684287aa82d', 'v0.0.543', 'SE-PreResNet-18', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('sepreresnet26', '0822', '2c73c690d9822ac7cfe22471da78816b4ac729f9', 'v0.0.543', 'SE-PreResNet-26', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('sepreresnetbc26b', '0660', 'f750b2f588a27620b30c86f0060a41422d4a0f75', 'v0.0.399', 'SE-PreResNet-BC-26b', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('sepreresnetbc38b', '0578', '12827fcd3c8c1a8c8ba1d109e85ffa67e7ab306a', 'v0.0.409', 'SE-PreResNet-BC-38b', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('sepreresnet50b', '0549', '4628a07d7dd92c775868dffd33fd6e3e7522c261', 'v0.0.461', 'SE-PreResNet-50b', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnext50_32x4d', '0451', '52029a7f6170873b2d50a7016fba053e98183f7b', 'v0.0.505', 'SE-ResNeXt-50 (32x4d)', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnext101_32x4d', '0467', 'c738e758c535fac87027fc4b9271a7cb95442505', 'v0.0.529', 'SE-ResNeXt-101 (32x4d)', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('seresnext101_64x4d', '0428', 'ea9d98df431d53251011099f317cd20fa2307d1b', 'v0.0.561', 'SE-ResNeXt-101 (64x4d)', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('senet16', '0820', '373aeafdc994c3e03bf483a9fa3ecb152353722a', 'v0.0.341', 'SENet-16', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('senet28', '0598', '27165b63696061e57c141314d44732aa65f807a8', 'v0.0.356', 'SENet-28', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('senet154', '0455', '95dbccbe56dc93c4544e6d1c6673f09425a4cee2', 'v0.0.522', 'SENet-154', '1709.01507', 'in1k', 224, 0.875, 200, ''), # noqa ('resnestabc14', '0647', '0c3d9e34aebf0dee0dbcbb937eb54f2a7fc8f64a', 'v0.0.493', 'ResNeSt(A)-BC-14', '2004.08955', 'in1k', 224, 0.875, 200, ''), # noqa ('resnesta18', '0707', 'efca5a69587dcdff3aa5d3d7cbd621d082299e27', 'v0.0.489', 'ResNeSt(A)-18', '2004.08955', 'in1k', 224, 0.875, 200, ''), # noqa ('resnestabc26', '0476', '7d97b20648e1f38454e6f5b1fe796c8eaf6e7e74', 'v0.0.495', 'ResNeSt(A)-BC-26', '2004.08955', 'in1k', 224, 0.875, 200, ''), # noqa ('resnesta50', '0449', '8ebaf2c7ee098e60bf9426c21d49a21bc00fa8d0', 'v0.0.531', 'ResNeSt(A)-50', '2004.08955', 'in1k', 224, 0.875, 200, ''), # noqa ('resnesta101', '0403', '61e147732069b54ed4da4b342b1b8526a0e9df54', 'v0.0.465', 'ResNeSt(A)-101', '2004.08955', 'in1k', 224, 0.875, 200, ''), # noqa ('resnesta152', '0463', '42e22fedbf9e7a8b2286163e3380044189d524c0', 'v0.0.540', 'ResNeSt(A)-152', '2004.08955', 'in1k', 224, 0.875, 200, ''), # noqa ('resnesta200', '0339', '6dc300871b186950ee64fd28bb168f7fb4a036e3', 'v0.0.465', 'ResNeSt(A)-200', '2004.08955', 'in1k', 256, 0.875, 150, ''), # noqa ('resnesta269', '0338', '6a555ce85eb177299eb43747cf019a50d3a143c1', 'v0.0.465', 'ResNeSt(A)-269', '2004.08955', 'in1k', 320, 0.875, 100, ''), # noqa ('ibn_resnet50', '0576', '40c420fcbbfd87bf634fc5b351746e124c32e401', 'v0.0.495', 'IBN-ResNet-50', '1807.09441', 'in1k', 224, 0.875, 200, ''), # noqa ('ibn_resnet101', '0507', '6f488f243cb02e8f4e934a390f8037cef927dcf7', 'v0.0.552', 'IBN-ResNet-101', '1807.09441', 'in1k', 224, 0.875, 200, ''), # noqa ('ibnb_resnet50', '0597', '383b44324af7bb3842b93df177bdd199864e0e8d', 'v0.0.552', 'IBN(b)-ResNet-50', '1807.09441', 'in1k', 224, 0.875, 200, ''), # noqa ('ibn_resnext101_32x4d', '0512', '73534cc42c9f7b1aa859b32e012c31f9ea66fd60', 'v0.0.553', 'IBN-ResNeXt-101 (32x4d)', '1807.09441', 'in1k', 224, 0.875, 200, ''), # noqa ('ibn_densenet121', '0673', '0ea2c535382c7a3d92e712617d8405ba631c071f', 'v0.0.493', 'IBN-DenseNet-121', '1807.09441', 'in1k', 224, 0.875, 200, ''), # noqa ('ibn_densenet169', '0619', 'ec2c0556f4fb2e2e51d49460095bf28259cb5d19', 'v0.0.500', 'IBN-DenseNet-169', '1807.09441', 'in1k', 224, 0.875, 200, ''), # noqa ('airnet50_1x64d_r2', '0532', '398445f4059b5505e2fd5b7338fe174960f8571a', 'v0.0.522', 'AirNet50-1x64d (r=2)', '', 'in1k', 224, 0.875, 200, ''), # noqa ('airnet50_1x64d_r16', '0560', 'd46d344b7e4216d43dc83659afd265d11cf3e05e', 'v0.0.519', 'AirNet50-1x64d (r=16)', '', 'in1k', 224, 0.875, 200, ''), # noqa ('airnext50_32x4d_r2', '0515', '85f13273529e6c4192a790fc55dafa7f022376f4', 'v0.0.521', 'AirNeXt50-32x4d (r=2)', '', 'in1k', 224, 0.875, 200, ''), # noqa ('bam_resnet50', '0547', 'a04adf3c93f56836509f66668aa90360c9688eb8', 'v0.0.499', 'BAM-ResNet-50', '1807.06514', 'in1k', 224, 0.875, 200, ''), # noqa ('cbam_resnet50', '0505', 'd8cf8488efb97afecd6b3287a3ca9fa093fc3127', 'v0.0.537', 'CBAM-ResNet-50', '1807.06521', 'in1k', 224, 0.875, 200, ''), # noqa ('scnet50', '0547', '18741240886d8e260c228027f3ac44fc1c741f90', 'v0.0.493', 'SCNet-50', '', 'in1k', 224, 0.875, 200, ''), # noqa ('scnet101', '0484', '13801569a6e07724ebc998d3face11c9b867288b', 'v0.0.507', 'SCNet-101', '', 'in1k', 224, 0.875, 200, ''), # noqa ('scneta50', '0468', 'eb3c25d6c9c8b6c0815a724d798b9b5a2b27ce34', 'v0.0.472', 'SCNet(A)-50', '', 'in1k', 224, 0.875, 200, '[MCG-NKU/SCNet]'), # noqa ('regnetx002', '1066', 'e389d6ce5846b65a5859152243d821308252e202', 'v0.0.475', 'RegNetX-200MF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnetx004', '0866', '9584cc0b8e461f624b3050a59bb36b15e04df980', 'v0.0.479', 'RegNetX-400MF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnetx006', '0791', '30ca597ae0506cb588a7fd8d2fecc4be8402b0cf', 'v0.0.482', 'RegNetX-600MF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnetx008', '0740', '157abf5e7c9244a482bf7655e75bfaea143b4d61', 'v0.0.482', 'RegNetX-800MF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnetx016', '0637', '6de8a97b67a34be6e9acc234261f051da1b9444a', 'v0.0.486', 'RegNetX-1.6GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnetx032', '0592', '75dc82ab5cbc1b715444b8336b5178580bd6d7d9', 'v0.0.492', 'RegNetX-3.2GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnetx040', '0488', 'b891108c3dd4594ae0d6ecb91ad7be3d2d96878d', 'v0.0.495', 'RegNetX-4.0GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnetx064', '0468', 'bea758f904ea74e88b85040221f024c8553cf8f8', 'v0.0.535', 'RegNetX-6.4GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnetx080', '0486', '1d94db030638ab1dd01c644be700a14e5d05ca74', 'v0.0.515', 'RegNetX-8.0GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnetx120', '0532', 'a93ee3a7abdd3b6b1d117861d02fc7a344185458', 'v0.0.542', 'RegNetX-12GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnetx160', '0477', 'bd9f3534c727d3e69c410b1909253cce4815385e', 'v0.0.532', 'RegNetX-16GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnetx320', '0413', '34bc3cd236481d9b96d3405f58855c7582270583', 'v0.0.548', 'RegNetX-32GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnety002', '0980', '57f04168f284797b799d624d906f5d38dcf23177', 'v0.0.476', 'RegNetY-200MF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnety004', '0769', '8c36573f17d3ef2ab8770be2593e94d714b035d7', 'v0.0.481', 'RegNetY-400MF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnety006', '0712', 'd6401a374a2c35ed1b2ac29a885438834c38cd0a', 'v0.0.483', 'RegNetY-600MF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnety008', '0660', 'ed298c233ef1ce2e3f82a6d23be1eebd43afdd75', 'v0.0.483', 'RegNetY-800MF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnety016', '0581', 'b45eccd6d1a80dc6e5608abd89c79db7547f2735', 'v0.0.486', 'RegNetY-1.6GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnety032', '0404', 'cb3314864b68dfd2e0037928a3b635c81f86ccb2', 'v0.0.473', 'RegNetY-3.2GF', '', 'in1k', 224, 0.875, 200, '[rwightman/pyt...models]'), # noqa ('regnety040', '0470', '052d76810aca2267e217a219d600299acc171c40', 'v0.0.494', 'RegNetY-4.0GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnety064', '0456', 'bff39135d55313cb424adeb8bb4b22db7fea09ba', 'v0.0.513', 'RegNetY-6.4GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnety080', '0448', 'c084bf6a0ee2f7722396622fb7865ee0c19b7244', 'v0.0.516', 'RegNetY-8.0GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnety120', '0442', 'bf25956032eb6d98134a8e8b0e4640324cc92e59', 'v0.0.526', 'RegNetY-12GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnety160', '0444', 'e7e05d91c588a308e1676163fd3ed914b61ab12e', 'v0.0.527', 'RegNetY-16GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('regnety320', '0383', 'a8b16205af3911b14f2fb3ca7cf55529a94fa52f', 'v0.0.550', 'RegNetY-32GF', '', 'in1k', 224, 0.875, 200, ''), # noqa ('pyramidnet101_a360', '0543', '7f1747f84b83b504ece3eb2bc3924d36343358ad', 'v0.0.507', 'PyramidNet-101 (a=360)', '1610.02915', 'in1k', 224, 0.875, 200, ''), # noqa ('diracnet18v2', '1170', 'e06737707a1f5a5c7fe4e57da92ed890b034cb9a', 'v0.0.111', 'DiracNetV2-18', '1706.00388', 'in1k', 224, 0.875, 200, '[szagoruyko/diracnets]'), # noqa ('diracnet34v2', '0993', 'a6a661c0c3e96af320e5b9bf65a6c8e5e498a474', 'v0.0.111', 'DiracNetV2-34', '1706.00388', 'in1k', 224, 0.875, 200, '[szagoruyko/diracnets]'), # noqa ('densenet121', '0704', 'cf90d1394d197fde953f57576403950345bd0a66', 'v0.0.314', 'DenseNet-121', '1608.06993', 'in1k', 224, 0.875, 200, ''), # noqa ('densenet161', '0606', 'da489277afe7f53048ec15bed7919486e22f1afa', 'v0.0.432', 'DenseNet-161', '1608.06993', 'in1k', 224, 0.875, 200, ''), # noqa ('densenet169', '0629', '44974a17309bb378e97c8f70f96f961ffbf9458d', 'v0.0.406', 'DenseNet-169', '1608.06993', 'in1k', 224, 0.875, 200, ''), # noqa ('densenet201', '0612', '6adc8625a4afa53e335272bab01b4908a0ca3f00', 'v0.0.426', 'DenseNet-201', '1608.06993', 'in1k', 224, 0.875, 200, ''), # noqa ('condensenet74_c4_g4', '0828', '5ba550494cae7081d12c14b02b2a02365539d377', 'v0.0.4', 'CondenseNet-74 (C=G=4)', '1711.09224', 'in1k', 224, 0.875, 200, '[ShichenLiu/CondenseNet]'), # noqa ('condensenet74_c8_g8', '1006', '3574d874fefc3307f241690bad51f20e61be1542', 'v0.0.4', 'CondenseNet-74 (C=G=8)', '1711.09224', 'in1k', 224, 0.875, 200, '[ShichenLiu/CondenseNet]'), # noqa ('peleenet', '1004', '5107a95d09d062cb152986169aa5b6f8f08afa47', 'v0.0.496', 'PeleeNet', '1804.06882', 'in1k', 224, 0.875, 200, ''), # noqa ('wrn50_2', '0626', '1e67b96cbfabe9a3717a8257ac8bf9d6ebc9d2cf', 'v0.0.520', 'WRN-50-2', '1605.07146', 'in1k', 224, 0.875, 200, ''), # noqa ('drnc26', '0723', 'e7306483781db61f71302eda6769d7d9fd126bf6', 'v0.0.508', 'DRN-C-26', '1705.09914', 'in1k', 224, 0.875, 200, ''), # noqa ('drnc42', '0628', '8817241f62263c6375ff3c17a9d34f42067a114d', 'v0.0.556', 'DRN-C-42', '1705.09914', 'in1k', 224, 0.875, 200, ''), # noqa ('drnc58', '0527', '3f74be98f80db3273ed764ded5bcb5d8bdf0b907', 'v0.0.559', 'DRN-C-58', '1705.09914', 'in1k', 224, 0.875, 200, ''), # noqa ('drnd22', '0758', '02cb44bdea9b05e988e65576f79f5f5c133f2664', 'v0.0.498', 'DRN-D-22', '1705.09914', 'in1k', 224, 0.875, 200, ''), # noqa ('drnd38', '0643', '496f648b8b8427050ad3327077f9a9b7a07fbcc6', 'v0.0.552', 'DRN-D-38', '1705.09914', 'in1k', 224, 0.875, 200, ''), # noqa ('drnd54', '0517', 'caa3c85dbdb39397f049da649d196b15704427b3', 'v0.0.554', 'DRN-D-54', '1705.09914', 'in1k', 224, 0.875, 200, ''), # noqa ('drnd105', '0501', '8dc6aa76c16cb1964929adf53183d1e0324ae051', 'v0.0.564', 'DRN-D-105', '1705.09914', 'in1k', 224, 0.875, 200, ''), # noqa ('dpn68', '0679', 'a33c98c783cbf93cca4cc9ce1584da50a6b12077', 'v0.0.310', 'DPN-68', '1707.01629', 'in1k', 224, 0.875, 200, ''), # noqa ('dpn98', '0430', '50ff8ef6cc0a11461dfd7168c291e2fce4382d24', 'v0.0.540', 'DPN-98', '1707.01629', 'in1k', 224, 0.875, 200, ''), # noqa ('dpn131', '0503', '1765c5eec6e62bfe03cd25e1b31225b827cc9636', 'v0.0.534', 'DPN-131', '1707.01629', 'in1k', 224, 0.875, 200, ''), # noqa ('darknet_tiny', '1784', '4561e1ada619e33520d1f765b3321f7f8ea6196b', 'v0.0.69', 'DarkNet Tiny', '', 'in1k', 224, 0.875, 200, ''), # noqa ('darknet_ref', '1718', '034595b49113ee23de72e36f7d8a3dbb594615f6', 'v0.0.64', 'DarkNet Ref', '', 'in1k', 224, 0.875, 200, ''), # noqa ('darknet53', '0558', '8be575a04c1789c16b7fa6835919461bb5b174d1', 'v0.0.501', 'DarkNet-53', '1804.02767', 'in1k', 224, 0.875, 200, ''), # noqa ('irevnet301', '0752', 'd378865f937472907a78b9832c46ec7fe8893fdc', 'v0.0.564', 'i-RevNet-301', '1802.07088', 'in1k', 224, 0.875, 200, '[jhjacobsen/pytorch-i-revnet]'), # noqa ('bagnet9', '2576', '36d935e1ec250208f585a1a53b65c79ddc11d7cd', 'v0.0.553', 'BagNet-9', '', 'in1k', 224, 0.875, 200, ''), # noqa ('bagnet17', '1551', '04da269cb4db817fa8750c2605e4fe7e6c0250ed', 'v0.0.558', 'BagNet-17', '', 'in1k', 224, 0.875, 200, ''), # noqa ('bagnet33', '1070', '7d16b6f4190ed5ce3f4f26373d60b51cdc5d4cd9', 'v0.0.561', 'BagNet-33', '', 'in1k', 224, 0.875, 200, ''), # noqa ('dla34', '0724', '649c67e61942283abe7f6a798fb9fcae346e5a5d', 'v0.0.486', 'DLA-34', '1707.06484', 'in1k', 224, 0.875, 200, ''), # noqa ('dla46c', '1323', 'efcd363642a4b479892f47edae7440f0eea05edb', 'v0.0.282', 'DLA-46-C', '1707.06484', 'in1k', 224, 0.875, 200, ''), # noqa ('dla46xc', '1269', '00d3754ad0ff22636bb1f4b4fb8baebf4751a1ee', 'v0.0.293', 'DLA-X-46-C', '1707.06484', 'in1k', 224, 0.875, 200, ''), # noqa ('dla60', '0570', 'f8ea80aa6155591c1082b3caaa0815d164ae2259', 'v0.0.494', 'DLA-60', '1707.06484', 'in1k', 224, 0.875, 200, ''), # noqa ('dla60x', '0575', 'fae6dc6d434d4cf0b52e5d4b3da13b5230d08c02', 'v0.0.493', 'DLA-X-60', '1707.06484', 'in1k', 224, 0.875, 200, ''), # noqa ('dla60xc', '1091', '0f6381f335e5bbb4c69b360be61a4a08e5c7a9de', 'v0.0.289', 'DLA-X-60-C', '1707.06484', 'in1k', 224, 0.875, 200, ''), # noqa ('dla102', '0537', 'fdabf0c31bd2e359ee9a8374b6a42d1396093cf1', 'v0.0.505', 'DLA-102', '1707.06484', 'in1k', 224, 0.875, 200, ''), # noqa ('dla102x', '0488', 'b1727759bba2394891f74481ceb91a603f0b4c8e', 'v0.0.528', 'DLA-X-102', '1707.06484', 'in1k', 224, 0.875, 200, ''), # noqa ('dla102x2', '0437', '8922a4575b1e4bdd30acd084a5b6ec1f972ec82d', 'v0.0.542', 'DLA-X2-102', '1707.06484', 'in1k', 224, 0.875, 200, ''), # noqa ('dla169', '0471', '402f95f01800539345428ec17e32d033886452c1', 'v0.0.539', 'DLA-169', '1707.06484', 'in1k', 224, 0.875, 200, ''), # noqa ('fishnet150', '0475', '93e26daaf570bc92b58f7421ab28c22ca405ad93', 'v0.0.502', 'FishNet-150', '', 'in1k', 224, 0.875, 200, ''), # noqa ('espnetv2_wd2', '2015', 'd234781f81e5d1b5ae6070fc851e3f7bb860b9fd', 'v0.0.238', 'ESPNetv2 x0.5', '1811.11431', 'in1k', 224, 0.875, 200, '[sacmehta/ESPNetv2]'), # noqa ('espnetv2_w1', '1345', '550d54229d7fd8f7c090601c2123ab3ca106393b', 'v0.0.238', 'ESPNetv2 x1.0', '1811.11431', 'in1k', 224, 0.875, 200, '[sacmehta/ESPNetv2]'), # noqa ('espnetv2_w5d4', '1218', '85d97b2b1c9ebb176f634949ef5ca6d7fe70f09c', 'v0.0.238', 'ESPNetv2 x1.25', '1811.11431', 'in1k', 224, 0.875, 200, '[sacmehta/ESPNetv2]'), # noqa ('espnetv2_w3d2', '1108', '40da2416923f5a79ae1001d2bbc9c7cbdf8c8d67', 'v0.0.566', 'ESPNetv2 x1.5', '1811.11431', 'in1k', 224, 0.875, 200, ''), # noqa ('espnetv2_w2', '0961', '13ba0f7200eb745bacdf692905fde711236448ef', 'v0.0.238', 'ESPNetv2 x2.0', '1811.11431', 'in1k', 224, 0.875, 200, '[sacmehta/ESPNetv2]'), # noqa ('dicenet_wd5', '2938', '2d721aa1795c7eb57dfabf73d17a416be64ae7fa', 'v0.0.497', 'DiCENet x0.2', '1906.03516', 'in1k', 224, 0.875, 200, '[sacmehta/EdgeNets]'), # noqa ('dicenet_wd2', '2258', '4f35289a84f31aece5747d01fa54779f7d9dd1db', 'v0.0.497', 'DiCENet x0.5', '1906.03516', 'in1k', 224, 0.875, 200, '[sacmehta/EdgeNets]'), # noqa ('dicenet_w3d4', '1574', '29d7d14f444f7cefa4d098f24bd171ad23249b1c', 'v0.0.497', 'DiCENet x0.75', '1906.03516', 'in1k', 224, 0.875, 200, '[sacmehta/EdgeNets]'), # noqa ('dicenet_w1', '1325', 'd3648c4c3f0376c3b02ee1fdfdf683462317c77f', 'v0.0.497', 'DiCENet x1.0', '1906.03516', 'in1k', 224, 0.875, 200, '[sacmehta/EdgeNets]'), # noqa ('dicenet_w5d4', '1240', '8c4dd6f6be26e3c29012377e4b1bd88d5089977a', 'v0.0.497', 'DiCENet x1.25', '1906.03516', 'in1k', 224, 0.875, 200, '[sacmehta/EdgeNets]'), # noqa ('dicenet_w3d2', '1123', 'e5c5db64a407bd9cd6567301b2d6477ea614dc87', 'v0.0.497', 'DiCENet x1.5', '1906.03516', 'in1k', 224, 0.875, 200, '[sacmehta/EdgeNets]'), # noqa ('dicenet_w7d8', '1062', '8b599d4697ce5f2c95f26104796c3089cff5f6c6', 'v0.0.497', 'DiCENet x1.75', '1906.03516', 'in1k', 224, 0.875, 200, '[sacmehta/EdgeNets]'), # noqa ('dicenet_w2', '0945', '5c48ba97187df4bbc9ca30071facd1728f8808ad', 'v0.0.569', 'DiCENet x2.0', '1906.03516', 'in1k', 224, 0.875, 200, ''), # noqa ('hrnet_w18_small_v1', '0901', '300230646c0796b7ba20954a9245803ecac4cdf0', 'v0.0.492', 'HRNet-W18 Small V1', '1908.07919', 'in1k', 224, 0.875, 200, ''), # noqa ('hrnet_w18_small_v2', '0618', 'ef7b1fe4e206cadaad6a59faef1e0bc6104da825', 'v0.0.499', 'HRNet-W18 Small V2', '1908.07919', 'in1k', 224, 0.875, 200, ''), # noqa ('hrnetv2_w18', '0512', '9d2b7fbfb4a0efd878172ec8f81d517ba347a6a2', 'v0.0.508', 'HRNetV2-W18', '1908.07919', 'in1k', 224, 0.875, 200, ''), # noqa ('hrnetv2_w30', '0521', '73d7e48d2006d86c50d03ed24c92277b77fb5146', 'v0.0.525', 'HRNetV2-W30', '1908.07919', 'in1k', 224, 0.875, 200, ''), # noqa ('hrnetv2_w32', '0506', '4aaf8a212b65f4b97f572b6fbbda4fa63ad0954a', 'v0.0.528', 'HRNetV2-W32', '1908.07919', 'in1k', 224, 0.875, 200, ''), # noqa ('hrnetv2_w40', '0493', '6f6d22d3e778c9f80d83d73ecf114fa68784ca6f', 'v0.0.534', 'HRNetV2-W40', '1908.07919', 'in1k', 224, 0.875, 200, ''), # noqa ('hrnetv2_w44', '0501', 'ec40e5455147db5a03aab423cac75b816030976d', 'v0.0.541', 'HRNetV2-W44', '1908.07919', 'in1k', 224, 0.875, 200, ''), # noqa ('hrnetv2_w48', '0500', '0554b840b6f3f87403433595d946170d91d15334', 'v0.0.541', 'HRNetV2-W48', '1908.07919', 'in1k', 224, 0.875, 200, ''), # noqa ('hrnetv2_w64', '0487', '108e78b1f2eedcf705bcce55e286969861f67cf8', 'v0.0.543', 'HRNetV2-W64', '1908.07919', 'in1k', 224, 0.875, 200, ''), # noqa ('vovnet27s', '0997', 'b7a5bf677bd3431bbed44b439fde7a01d699ace1', 'v0.0.551', 'VoVNet-27-slim', '1904.09730', 'in1k', 224, 0.875, 200, ''), # noqa ('vovnet39', '0564', '63bfa613870b37bd4fb5b71412e7875392aa4f66', 'v0.0.493', 'VoVNet-39', '1904.09730', 'in1k', 224, 0.875, 200, ''), # noqa ('vovnet57', '0518', 'c080e47169a176043f298b1e909ddd8776d5aa76', 'v0.0.505', 'VoVNet-57', '1904.09730', 'in1k', 224, 0.875, 200, ''), # noqa ('selecsls42b', '0611', 'acff1e8b36428719059eec4b60c7b2c045a54d8e', 'v0.0.493', 'SelecSLS-42b', '1907.00837', 'in1k', 224, 0.875, 200, ''), # noqa ('selecsls60', '0529', '1e1b05bc1432fe7c4a8bac26278c16f7486a498f', 'v0.0.496', 'SelecSLS-60', '1907.00837', 'in1k', 224, 0.875, 200, ''), # noqa ('selecsls60b', '0559', 'a0e7b4effe66dc58c76d22a7647dfce7f3639c33', 'v0.0.495', 'SelecSLS-60b', '1907.00837', 'in1k', 224, 0.875, 200, ''), # noqa ('hardnet39ds', '0881', 'ea47fc939a130a70c5fa3326c3af6ba049a99f92', 'v0.0.485', 'HarDNet-39DS', '1909.00948', 'in1k', 224, 0.875, 200, ''), # noqa ('hardnet68ds', '0756', 'e0da07508c1eb92fee49df42243836892fe2f4c8', 'v0.0.487', 'HarDNet-68DS', '1909.00948', 'in1k', 224, 0.875, 200, ''), # noqa ('hardnet68', '0699', '2e207f79a1995f5f30d5b9fca3391bb8e7b8594f', 'v0.0.435', 'HarDNet-68', '1909.00948', 'in1k', 224, 0.875, 200, '[PingoLH/Pytorch-HarDNet]'), # noqa ('hardnet85', '0586', '39d80e9361844e8ba02b08e93a7440eac14d2eda', 'v0.0.495', 'HarDNet-85', '1909.00948', 'in1k', 224, 0.875, 200, ''), # noqa ('squeezenet_v1_0', '1766', 'afdbcf1aef39237300656d2c5a7dba19230e29fc', 'v0.0.128', 'SqueezeNet v1.0', '1602.07360', 'in1k', 224, 0.875, 200, ''), # noqa ('squeezenet_v1_1', '1772', '25b77bc39e35612abbe7c2344d2c3e1e6756c2f8', 'v0.0.88', 'SqueezeNet v1.1', '1602.07360', 'in1k', 224, 0.875, 200, ''), # noqa ('squeezeresnet_v1_0', '1809', '25bfc02edeffb279010242614e7d73bbeacc0170', 'v0.0.178', 'SqueezeResNet v1.0', '1602.07360', 'in1k', 224, 0.875, 200, ''), # noqa ('squeezeresnet_v1_1', '1821', 'c27ed88f1b19eb233d3925efc71c71d25e4c434e', 'v0.0.70', 'SqueezeResNet v1.1', '1602.07360', 'in1k', 224, 0.875, 200, ''), # noqa ('sqnxt23_w1', '1906', '97b74e0c4d6bf9fc939771d94b2f6dd97de34024', 'v0.0.171', '1.0-SqNxt-23', '1803.10615', 'in1k', 224, 0.875, 200, ''), # noqa ('sqnxt23v5_w1', '1785', '2fe3ad67d73313193a77690b10c17cbceef92340', 'v0.0.172', '1.0-SqNxt-23v5', '1803.10615', 'in1k', 224, 0.875, 200, ''), # noqa ('sqnxt23_w3d2', '1350', 'c2f21bce669dbe50fba544bcc39bc1302f63e1e8', 'v0.0.210', '1.5-SqNxt-23', '1803.10615', 'in1k', 224, 0.875, 200, ''), # noqa ('sqnxt23v5_w3d2', '1301', 'c244844ba2f02dadd350dddd74e21360b452f9dd', 'v0.0.212', '1.5-SqNxt-23v5', '1803.10615', 'in1k', 224, 0.875, 200, ''), # noqa ('sqnxt23_w2', '1100', 'b9bb7302824f89f16e078f0a506e3a8c0ad9c74e', 'v0.0.240', '2.0-SqNxt-23', '1803.10615', 'in1k', 224, 0.875, 200, ''), # noqa ('sqnxt23v5_w2', '1066', '229b0d3de06197e399eeebf42dc826b78f0aba86', 'v0.0.216', '2.0-SqNxt-23v5', '1803.10615', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenet_g1_wd4', '3729', '47dbd0f279da6d3056079bb79ad39cabbb3b9415', 'v0.0.134', 'ShuffleNet x0.25 (g=1)', '1707.01083', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenet_g3_wd4', '3653', '6abdd65e087e71f80345415cdf7ada6ed2762d60', 'v0.0.135', 'ShuffleNet x0.25 (g=3)', '1707.01083', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenet_g1_wd2', '2261', 'dae4bdadd7d48bee791dff2a08cd697cff0e9320', 'v0.0.174', 'ShuffleNet x0.5 (g=1)', '1707.01083', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenet_g3_wd2', '2080', 'ccaacfc8d9ac112c6143269df6e258fd55b662a7', 'v0.0.167', 'ShuffleNet x0.5 (g=3)', '1707.01083', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenet_g1_w3d4', '1711', '161cd24aa0b2e2afadafa69b44a28af222f2ec7a', 'v0.0.218', 'ShuffleNet x0.75 (g=1)', '1707.01083', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenet_g3_w3d4', '1650', '3f3b0aef0ce3174c78ff42cf6910c6e34540fc41', 'v0.0.219', 'ShuffleNet x0.75 (g=3)', '1707.01083', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenet_g1_w1', '1389', '4cfb65a30761fe548e0b5afbb5d89793ec41e4e9', 'v0.0.223', 'ShuffleNet x1.0 (g=1)', '1707.01083', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenet_g2_w1', '1363', '07256203e217a7b31f1c69a5bd38a6674bce75bc', 'v0.0.241', 'ShuffleNet x1.0 (g=2)', '1707.01083', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenet_g3_w1', '1348', 'ce54f64ecff87556a4303380f46abaaf649eb308', 'v0.0.244', 'ShuffleNet x1.0 (g=3)', '1707.01083', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenet_g4_w1', '1335', 'e2415f8270a4b6cbfe7dc97044d497edbc898577', 'v0.0.245', 'ShuffleNet x1.0 (g=4)', '1707.01083', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenet_g8_w1', '1342', '9a979b365424addba75c559a61a77ac7154b26eb', 'v0.0.250', 'ShuffleNet x1.0 (g=8)', '1707.01083', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenetv2_wd2', '1865', '9c22238b5fa9c09541564e8ed7f357a5f7e8cd7c', 'v0.0.90', 'ShuffleNetV2 x0.5', '1807.11164', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenetv2_w1', '1163', 'c71dfb7a814c8d8ef704bdbd80995e9ea49ff4ff', 'v0.0.133', 'ShuffleNetV2 x1.0', '1807.11164', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenetv2_w3d2', '0942', '26a9230405d956643dcd563a5a383844c49b5907', 'v0.0.288', 'ShuffleNetV2 x1.5', '1807.11164', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenetv2_w2', '0845', '337255f6ad40a93c2f23fc593bad4b2755a327fa', 'v0.0.301', 'ShuffleNetV2 x2.0', '1807.11164', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenetv2b_wd2', '1822', '01d18d6fa1a6136f605a4277f47c9a757f9ede3b', 'v0.0.157', 'ShuffleNetV2b x0.5', '1807.11164', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenetv2b_w1', '1125', '6a5d3dc446e6a00cf60fe8aa2f4139d74d766305', 'v0.0.161', 'ShuffleNetV2b x1.0', '1807.11164', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenetv2b_w3d2', '0911', 'f2106fee0748d7f0d40db16b228782b6d7636737', 'v0.0.203', 'ShuffleNetV2b x1.5', '1807.11164', 'in1k', 224, 0.875, 200, ''), # noqa ('shufflenetv2b_w2', '0834', 'cb36b92ca4ca3bee470b739021d01177e0601c5f', 'v0.0.242', 'ShuffleNetV2b x2.0', '1807.11164', 'in1k', 224, 0.875, 200, ''), # noqa ('menet108_8x1_g3', '2076', '6acc82e46dfc1ce0dd8c59668aed4a464c8cbdb5', 'v0.0.89', '108-MENet-8x1 (g=3)', '1803.09127', 'in1k', 224, 0.875, 200, ''), # noqa ('menet128_8x1_g4', '1959', '48fa80fc363adb88ff580788faa8053c9d7507f3', 'v0.0.103', '128-MENet-8x1 (g=4)', '1803.09127', 'in1k', 224, 0.875, 200, ''), # noqa ('menet160_8x1_g8', '2084', '0f4fce43b4234c5bca5dd76450b698c2d4daae65', 'v0.0.154', '160-MENet-8x1 (g=8)', '1803.09127', 'in1k', 224, 0.875, 200, ''), # noqa ('menet228_12x1_g3', '1316', '5b670c42031d0078e2ae981829358d7c1b92ee30', 'v0.0.131', '228-MENet-12x1 (g=3)', '1803.09127', 'in1k', 224, 0.875, 200, ''), # noqa ('menet256_12x1_g4', '1252', '14c6c86df96435c693eb7d0fcd8d3bf4079dd621', 'v0.0.152', '256-MENet-12x1 (g=4)', '1803.09127', 'in1k', 224, 0.875, 200, ''), # noqa ('menet348_12x1_g3', '0958', 'ad50f635a1f7b799a19a0a9c71aa9939db8ffe77', 'v0.0.173', '348-MENet-12x1 (g=3)', '1803.09127', 'in1k', 224, 0.875, 200, ''), # noqa ('menet352_12x1_g8', '1200', '4ee200c5c98c64a2503cea82ebf62d1d3c07fb91', 'v0.0.198', '352-MENet-12x1 (g=8)', '1803.09127', 'in1k', 224, 0.875, 200, ''), # noqa ('menet456_24x1_g3', '0799', '826c002244f1cdc945a95302b1ce5c66d949db74', 'v0.0.237', '456-MENet-24x1 (g=3)', '1803.09127', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenet_wd4', '2249', '1ad5e8fe8674cdf7ffda8450095eb96d227397e0', 'v0.0.62', 'MobileNet x0.25', '1704.04861', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenet_wd2', '1355', '41a21242c95050407df876cfa44bb5d3676aa751', 'v0.0.156', 'MobileNet x0.5', '1704.04861', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenet_w3d4', '1076', 'd801bcaea83885b16a0306b8b77fe314bbc585c3', 'v0.0.130', 'MobileNet x0.75', '1704.04861', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenet_w1', '0895', '7e1d739f0fd4b95c16eef077c5dc0a5bb1da8ad5', 'v0.0.155', 'MobileNet x1.0', '1704.04861', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenetb_wd4', '2201', '428da928e43ecc387763bea8faa8ccc51244cb0e', 'v0.0.481', 'MobileNet(B) x0.25', '1704.04861', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenetb_wd2', '1310', 'd1549ead8d09cc81f8a1542952a8a30fa937caee', 'v0.0.480', 'MobileNet(B) x0.5', '1704.04861', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenetb_w3d4', '1037', '8d732bc9e6f5326ce1f31ce836623ac0970f1e16', 'v0.0.481', 'MobileNet(B) x0.75', '1704.04861', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenetb_w1', '0816', '107275a1173b201634cca077dd126a550bc99dae', 'v0.0.489', 'MobileNet(B) x1.0', '1704.04861', 'in1k', 224, 0.875, 200, ''), # noqa ('fdmobilenet_wd4', '3098', '2b22b709a05d7ca6e43acc6f3a9f27d0eb2e01cd', 'v0.0.177', 'FD-MobileNet x0.25', '1802.03750', 'in1k', 224, 0.875, 200, ''), # noqa ('fdmobilenet_wd2', '2015', '414dbeedb2f829dcd8f94cd7fef10aae6829f06f', 'v0.0.83', 'FD-MobileNet x0.5', '1802.03750', 'in1k', 224, 0.875, 200, ''), # noqa ('fdmobilenet_w3d4', '1641', '5561d58aa8889d8d93f2062a2af4e4b35ad7e769', 'v0.0.159', 'FD-MobileNet x0.75', '1802.03750', 'in1k', 224, 0.875, 200, ''), # noqa ('fdmobilenet_w1', '1338', '9d026c04112de9f40e15fa40457d77941443c327', 'v0.0.162', 'FD-MobileNet x1.0', '1802.03750', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenetv2_wd4', '2451', '05e1e3a286b27c17ea11928783c4cd48b1e7a9b2', 'v0.0.137', 'MobileNetV2 x0.25', '1801.04381', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenetv2_wd2', '1493', 'b82d79f6730eac625e6b55b0618bff8f7a1ed86d', 'v0.0.170', 'MobileNetV2 x0.5', '1801.04381', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenetv2_w3d4', '1082', '8656de5a8d90b29779c35c5ce521267c841fd717', 'v0.0.230', 'MobileNetV2 x0.75', '1801.04381', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenetv2_w1', '0887', '13a021bca5b679b76156829743f7182da42e8bb6', 'v0.0.213', 'MobileNetV2 x1.0', '1801.04381', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenetv2b_wd4', '2368', '399f95e6cb3c15d57516c1d328201a0af3de5882', 'v0.0.483', 'MobileNetV2b x0.25', '1801.04381', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenetv2b_wd2', '1408', 'f820ea858dd7be1bbe0ca4639581911d98183cde', 'v0.0.486', 'MobileNetV2b x0.5', '1801.04381', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenetv2b_w3d4', '1105', '0924efc9ca677d2bccfe3987b1e0e1e47afe69e8', 'v0.0.483', 'MobileNetV2b x0.75', '1801.04381', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenetv2b_w1', '0912', '2bcab1d0cd3be4eb270d65e390ff7c9776e38a04', 'v0.0.483', 'MobileNetV2b x1.0', '1801.04381', 'in1k', 224, 0.875, 200, ''), # noqa ('mobilenetv3_large_w1', '0744', 'b59cae6daf1edc5f412fcd794693bb22dc3d4573', 'v0.0.491', 'MobileNetV3 L/224/1.0', '1905.02244', 'in1k', 224, 0.875, 200, ''), # noqa ('igcv3_wd4', '2871', 'c9f28301391601e5e8ae93139431a9e0d467317c', 'v0.0.142', 'IGCV3 x0.25', '1806.00178', 'in1k', 224, 0.875, 200, ''), # noqa ('igcv3_wd2', '1732', '8c504f443283d8a32787275b23771082fcaab61b', 'v0.0.132', 'IGCV3 x0.5', '1806.00178', 'in1k', 224, 0.875, 200, ''), # noqa ('igcv3_w3d4', '1140', '63f43cf8d334111d55d06f2f9bf7e1e4871d162c', 'v0.0.207', 'IGCV3 x0.75', '1806.00178', 'in1k', 224, 0.875, 200, ''), # noqa ('igcv3_w1', '0920', '12385791681f09adb3a08926c95471f332f538b6', 'v0.0.243', 'IGCV3 x1.0', '1806.00178', 'in1k', 224, 0.875, 200, ''), # noqa ('mnasnet_b1', '0740', '7025b43c5c0251980ada2c591dd3e7e28d856e79', 'v0.0.493', 'MnasNet-B1', '1807.11626', 'in1k', 224, 0.875, 200, ''), # noqa ('mnasnet_a1', '0720', 'e155916ce24d06e273e8f90540707bcb7e1f9eab', 'v0.0.486', 'MnasNet-A1', '1807.11626', 'in1k', 224, 0.875, 200, ''), # noqa ('darts', '0775', 'fc3171c5b89b270fc7673dbbb5047f5879d7e774', 'v0.0.485', 'DARTS', '1806.09055', 'in1k', 224, 0.875, 200, '[quark0/darts]'), # noqa ('proxylessnas_cpu', '0761', 'fe9572b11899395acbeef9374827dcc04e103ce3', 'v0.0.304', 'ProxylessNAS CPU', '1812.00332', 'in1k', 224, 0.875, 200, '[MIT-HAN-LAB/ProxylessNAS]'), # noqa ('proxylessnas_gpu', '0745', 'acca5941c454d896410060434b8f983d2db80727', 'v0.0.333', 'ProxylessNAS GPU', '1812.00332', 'in1k', 224, 0.875, 200, ''), # noqa ('proxylessnas_mobile', '0780', '639a90c27de088402db76b09e410326795b6fbdd', 'v0.0.304', 'ProxylessNAS Mobile', '1812.00332', 'in1k', 224, 0.875, 200, '[MIT-HAN-LAB/ProxylessNAS]'), # noqa ('proxylessnas_mobile14', '0662', '0c0ad983f4fb88470d0f3e557d0b23f15e16624f', 'v0.0.331', 'ProxylessNAS Mob-14', '1812.00332', 'in1k', 224, 0.875, 200, ''), # noqa ('fbnet_cb', '0762', '2edb61f8e4b5c45d958d0e57beff41fbfacd6061', 'v0.0.415', 'FBNet-Cb', '1812.03443', 'in1k', 224, 0.875, 200, '[rwightman/pyt...models]'), # noqa ('xception', '0516', 'a75b50eceb5fdfb1e1bfaada6820a448ce40e593', 'v0.0.544', 'Xception', '1610.02357', 'in1k', 299, 0.875, 200, ''), # noqa ('inceptionv3', '0533', '025fb71c673f8e325f4c24f25cbd4185540cca72', 'v0.0.552', 'InceptionV3', '1512.00567', 'in1k', 299, 0.875, 200, ''), # noqa ('inceptionv4', '0488', '4ae4f331a5ff649e39626fc49cd5c24b8159cd8c', 'v0.0.543', 'InceptionV4', '1602.07261', 'in1k', 299, 0.875, 200, ''), # noqa ('inceptionresnetv1', '0480', 'f8b3e9e369ff38e28b4ae4def273ef78741e2e28', 'v0.0.552', 'InceptionResNetV1', '1602.07261', 'in1k', 299, 0.875, 200, ''), # noqa ('inceptionresnetv2', '0474', '19f51781f8a454803207e319289f404d50e252cb', 'v0.0.547', 'InceptionResNetV2', '1602.07261', 'in1k', 299, 0.875, 200, ''), # noqa ('polynet', '0452', '6a1b295dad3f261b48e845f1b283e4eef3ab5a0b', 'v0.0.96', 'PolyNet', '1611.05725', 'in1k', 331, 0.875, 200, '[Cadene/pretrained...pytorch]'), # noqa ('nasnet_4a1056', '0803', '44f5ecbe03da2cd21803c555366121e29b207907', 'v0.0.495', 'NASNet-A 4@1056', '1707.07012', 'in1k', 224, 0.875, 200, ''), # noqa ('nasnet_6a4032', '0421', 'f354d28f4acdde399e081260c3f46152eca5d27e', 'v0.0.101', 'NASNet-A 6@4032', '1707.07012', 'in1k', 331, 0.875, 200, '[Cadene/pretrained...pytorch]'), # noqa ('pnasnet5large', '0428', '65de46ebd049e494c13958d5671aba5abf803ff3', 'v0.0.114', 'PNASNet-5-Large', '1712.00559', 'in1k', 331, 0.875, 200, '[Cadene/pretrained...pytorch]'), # noqa ('spnasnet', '0798', 'a25ca15768d91c0c09b473352bf54a2b954257d4', 'v0.0.490', 'SPNASNet', '1904.02877', 'in1k', 224, 0.875, 200, ''), # noqa ('efficientnet_b0', '0752', '0e3861300b8f1d1d0fb1bd15f0e06bba1ad6309b', 'v0.0.364', 'EfficientNet-B0', '1905.11946', 'in1k', 224, 0.875, 200, ''), # noqa ('efficientnet_b1', '0638', 'ac77bcd722dc4f3edfa24b9fb7b8f9cece3d85ab', 'v0.0.376', 'EfficientNet-B1', '1905.11946', 'in1k', 240, 0.882, 200, ''), # noqa ('efficientnet_b0b', '0702', 'ecf61b9b50666a6b444a9d789a5ff1087c65d0d8', 'v0.0.403', 'EfficientNet-B0b', '1905.11946', 'in1k', 224, 0.875, 200, '[rwightman/pyt...models]'), # noqa ('efficientnet_b1b', '0594', '614e81663902850a738fa6c862fe406ecf205f73', 'v0.0.403', 'EfficientNet-B1b', '1905.11946', 'in1k', 240, 0.882, 200, '[rwightman/pyt...models]'), # noqa ('efficientnet_b2b', '0527', '531f10e6898778b7c3a82c2c149f8b3e6393a892', 'v0.0.403', 'EfficientNet-B2b', '1905.11946', 'in1k', 260, 0.890, 100, '[rwightman/pyt...models]'), # noqa ('efficientnet_b3b', '0445', '3c5fbba8c86121d4bc3bbc169804f24dd4c3d1f6', 'v0.0.403', 'EfficientNet-B3b', '1905.11946', 'in1k', 300, 0.904, 90, '[rwightman/pyt...models]'), # noqa ('efficientnet_b4b', '0389', '6305bfe688b261f0d4fef6829f520d5c98c46301', 'v0.0.403', 'EfficientNet-B4b', '1905.11946', 'in1k', 380, 0.922, 80, '[rwightman/pyt...models]'), # noqa ('efficientnet_b5b', '0337', 'e1c2ffcf710cbd3c53b9c08723282a370906731c', 'v0.0.403', 'EfficientNet-B5b', '1905.11946', 'in1k', 456, 0.934, 70, '[rwightman/pyt...models]'), # noqa ('efficientnet_b6b', '0323', 'e5c1d7c35fcff5fac07921a7696f7c04aba84012', 'v0.0.403', 'EfficientNet-B6b', '1905.11946', 'in1k', 528, 0.942, 60, '[rwightman/pyt...models]'), # noqa ('efficientnet_b7b', '0322', 'b9c5965a1e2572aaa772e20e8a2e3af7b4bee9a6', 'v0.0.403', 'EfficientNet-B7b', '1905.11946', 'in1k', 600, 0.949, 50, '[rwightman/pyt...models]'), # noqa ('efficientnet_b0c', '0675', '21778c6e3b5a1b9b08b60c3e69401ce7e12bead4', 'v0.0.433', 'EfficientNet-B0с', '1905.11946', 'in1k', 224, 0.875, 200, '[rwightman/pyt...models]*'), # noqa ('efficientnet_b1c', '0569', '239ed6a412530f60f810b29807da70c8ca63d8cc', 'v0.0.433', 'EfficientNet-B1с', '1905.11946', 'in1k', 240, 0.882, 200, '[rwightman/pyt...models]*'), # noqa ('efficientnet_b2c', '0503', 'be48d3d79f25a13a807b137d8a7ced41e8aab2bf', 'v0.0.433', 'EfficientNet-B2с', '1905.11946', 'in1k', 260, 0.890, 100, '[rwightman/pyt...models]*'), # noqa ('efficientnet_b3c', '0442', 'ea7080aba3fc20ac25c3c925bfadf1e8c1e7df4d', 'v0.0.433', 'EfficientNet-B3с', '1905.11946', 'in1k', 300, 0.904, 90, '[rwightman/pyt...models]*'), # noqa ('efficientnet_b4c', '0369', '5954cc05cfba3b0c8ee488b4488354fc0cef6623', 'v0.0.433', 'EfficientNet-B4с', '1905.11946', 'in1k', 380, 0.922, 80, '[rwightman/pyt...models]*'), # noqa ('efficientnet_b5c', '0310', '589fefc6de5d93b54698b5b03f1e05637f9d0cb6', 'v0.0.433', 'EfficientNet-B5с', '1905.11946', 'in1k', 456, 0.934, 70, '[rwightman/pyt...models]*'), # noqa ('efficientnet_b6c', '0296', '546e61da82bec69e3db5870b8df977e4615f7b32', 'v0.0.433', 'EfficientNet-B6с', '1905.11946', 'in1k', 528, 0.942, 60, '[rwightman/pyt...models]*'), # noqa ('efficientnet_b7c', '0288', '13d683f2ca56c1007acd9ad0be450f45efeec828', 'v0.0.433', 'EfficientNet-B7с', '1905.11946', 'in1k', 600, 0.949, 50, '[rwightman/pyt...models]*'), # noqa ('efficientnet_b8c', '0276', 'a9973d66d599c4e83029577842c039a20799f2c9', 'v0.0.433', 'EfficientNet-B8с', '1905.11946', 'in1k', 672, 0.954, 50, '[rwightman/pyt...models]*'), # noqa ('efficientnet_edge_small_b', '0640', 'e27c3444406ebddd86824e41a924c0b8188c4067', 'v0.0.434', 'EfficientNet-Edge-Small-b', '1905.11946', 'in1k', 224, 0.875, 200, '[rwightman/pyt...models]*'), # noqa ('efficientnet_edge_medium_b', '0563', '99fa34c7044281e521fb7cf4267763a5b03b7f1c', 'v0.0.434', 'EfficientNet-Edge-Medium-b', '1905.11946', 'in1k', 240, 0.882, 200, '[rwightman/pyt...models]*'), # noqa ('efficientnet_edge_large_b', '0491', 'd502326f9568f096491354a117f12562cf47e038', 'v0.0.434', 'EfficientNet-Edge-Large-b', '1905.11946', 'in1k', 300, 0.904, 90, '[rwightman/pyt...models]*'), # noqa ('mixnet_s', '0717', 'ab2c4e37062e7ea34a2cdd112f9354d4e67a0fef', 'v0.0.493', 'MixNet-S', '1907.09595', 'in1k', 224, 0.875, 200, ''), # noqa ('mixnet_m', '0647', '4d90d345a38ba5041ac5cae2921e07d1eca083b2', 'v0.0.493', 'MixNet-M', '1907.09595', 'in1k', 224, 0.875, 200, ''), # noqa ('mixnet_l', '0571', 'c686ba17fc6bca5f30ac596b37a7f95f2d4b6f30', 'v0.0.500', 'MixNet-L', '1907.09595', 'in1k', 224, 0.875, 200, ''), # noqa ('resneta10', '1190', 'a066e5e07f13f8f2a67971931496d1c1ac09bbe1', 'v0.0.484', 'ResNet(A)-10', '', 'in1k', 0, 0.0, 0, ''), # noqa ('resnetabc14b', '0990', 'bad51cb083aae58479112ad11a3fe9430346e185', 'v0.0.477', 'ResNet(A)-BC-14b', '', 'in1k', 0, 0.0, 0, ''), # noqa ('resneta18', '0831', 'e9f206f480c46b489fbd300fa77db31d740c4f3b', 'v0.0.486', 'ResNet(A)-18', '', 'in1k', 0, 0.0, 0, ''), # noqa ('resneta50b', '0556', '7cedbb3bd808c0644b4afe1d52e7dad6abd33516', 'v0.0.492', 'ResNet(A)-50b', '', 'in1k', 0, 0.0, 0, ''), # noqa ('resneta101b', '0453', '0f342545d0ef4f215efc391fd24fa395b2573a1d', 'v0.0.532', 'ResNet(A)-101b', '', 'in1k', 0, 0.0, 0, ''), # noqa ('resneta152b', '0441', 'c4b9bc9af946b25fd37de8cf4c58bdb0066dfeae', 'v0.0.524', 'ResNet(A)-152b', '', 'in1k', 0, 0.0, 0, ''), # noqa ('resnetd50b', '0565', 'ec03d815c0f016c6517ed7b4b40126af46ceb8a4', 'v0.0.296', '', '', 'in1k', 0, 0.0, 0, ''), # noqa ('resnetd101b', '0473', 'f851c920ec1fe4f729d339c933535d038bf2903c', 'v0.0.296', '', '', 'in1k', 0, 0.0, 0, ''), # noqa ('resnetd152b', '0482', '112e216da50eb20d52c509a28c97b05ef819cefe', 'v0.0.296', '', '', 'in1k', 0, 0.0, 0, ''), # noqa ('nin_cifar10', '0743', '795b082470b58c1aa94e2f861514b7914f6e2f58', 'v0.0.175', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('nin_cifar100', '2839', '627a11c064eb44c6451fe53e0becfc21a6d57d7f', 'v0.0.183', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('nin_svhn', '0376', '1205dc06a4847bece8159754033f325f75565c02', 'v0.0.270', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet20_cifar10', '0597', '9b0024ac4c2f374cde2c5052e0d0344a75871cdb', 'v0.0.163', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet20_cifar100', '2964', 'a5322afed92fa96cb7b3453106f73cf38e316151', 'v0.0.180', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet20_svhn', '0343', '8232e6e4c2c9fac1200386b68311c3bd56f483f5', 'v0.0.265', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet56_cifar10', '0452', '628c42a26fe347b84060136212e018df2bb35e0f', 'v0.0.163', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet56_cifar100', '2488', 'd65f53b10ad5d124698e728432844c65261c3107', 'v0.0.181', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet56_svhn', '0275', '6e08ed929b8f0ee649f75464f06b557089023290', 'v0.0.265', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet110_cifar10', '0369', '4d6ca1fc02eaeed724f4f596011e391528536049', 'v0.0.163', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet110_cifar100', '2280', 'd8d397a767db6d22af040223ec8ae342a088c3e5', 'v0.0.190', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet110_svhn', '0245', 'c971f0a38943d8a75386a60c835cc0843c2f6c1c', 'v0.0.265', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet164bn_cifar10', '0368', '74ae9f4bccb7fb6a8f3f603fdabe8d8632c46b2f', 'v0.0.179', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet164bn_cifar100', '2044', '8fa07b7264a075fa5add58f4c676b99a98fb1c89', 'v0.0.182', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet164bn_svhn', '0242', '549413723d787cf7e96903427a7a14fb3ea1a4c1', 'v0.0.267', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet272bn_cifar10', '0333', '84f28e0ca97eaeae0eb07e9f76054c1ba0c77c0e', 'v0.0.368', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet272bn_cifar100', '2007', 'a80d2b3ce14de6c90bf22d210d76ebd4a8c91928', 'v0.0.368', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet272bn_svhn', '0243', 'ab1d7da51f52cc6acb2e759736f2d58a77ce895e', 'v0.0.368', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet542bn_cifar10', '0343', '0fd36dd16587f49d33e0e36f1e8596d021a11439', 'v0.0.369', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet542bn_cifar100', '1932', 'a631d3ce5f12e145637a7b2faee663cddc94c354', 'v0.0.369', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet542bn_svhn', '0234', '04396c973121e356f2efda9a28c4e4086f1511b2', 'v0.0.369', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet1001_cifar10', '0328', '77a179e240808b7aa3534230d39b845a62413ca2', 'v0.0.201', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet1001_cifar100', '1979', '2728b558748f9c3e70db179afb6c62358020858b', 'v0.0.254', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet1001_svhn', '0241', '9e3d4bb55961db4c0f46a961b5323a4e03aea602', 'v0.0.408', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet1202_cifar10', '0353', '1d5a21290117903fb5fd6ba59f3f7e7da7c08836', 'v0.0.214', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet1202_cifar100', '2156', '86ecd091e5ac9677bf4518c644d08eb3e1d1708a', 'v0.0.410', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet20_cifar10', '0651', '76cec68d11de5b25be2ea5935681645b76195f1d', 'v0.0.164', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet20_cifar100', '3022', '3dbfa6a2b850572bccb28cc2477a0e46c24abcb8', 'v0.0.187', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet20_svhn', '0322', 'c3c00fed49c1d6d9deda6436d041c5788d549299', 'v0.0.269', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet56_cifar10', '0449', 'e9124fcf167d8ca50addef00c3afa4da9f828f29', 'v0.0.164', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet56_cifar100', '2505', 'ca90a2be6002cd378769b9d4e7c497dd883d31d9', 'v0.0.188', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet56_svhn', '0280', 'b51b41476710c0e2c941356ffe992ff883a3ee87', 'v0.0.269', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet110_cifar10', '0386', 'cc08946a2126a1224d1d2560a47cf766a763c52c', 'v0.0.164', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet110_cifar100', '2267', '3954e91581b7f3e5f689385d15f618fe16e995af', 'v0.0.191', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet110_svhn', '0279', 'aa49e0a3c4a918e227ca2d5a5608704f026134c3', 'v0.0.269', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet164bn_cifar10', '0364', '429012d412e82df7961fa071f97c938530e1b005', 'v0.0.196', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet164bn_cifar100', '2018', 'a8e67ca6e14f88b009d618b0e9b554312d862174', 'v0.0.192', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet164bn_svhn', '0258', '94d42de440d5f057a38f4c8cdbdb24acfee3981c', 'v0.0.269', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet272bn_cifar10', '0325', '1a6a016eb4e4a5549c1fcb89ed5af4c1e5715b72', 'v0.0.389', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet272bn_cifar100', '1963', '6fe0d2e24a60d12ab6b3d0e46065e2f14a46bc0b', 'v0.0.389', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet272bn_svhn', '0234', 'c04ef5c20a53f76824339fe75185d181be4bce61', 'v0.0.389', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet542bn_cifar10', '0314', '66fd6f2033dff08428e586bcce3e5151ed4274f9', 'v0.0.391', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet542bn_cifar100', '1871', '07f1fb258207d295789981519e8dab892fc08f8d', 'v0.0.391', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet542bn_svhn', '0236', '6bdf92368873ce1288526dc405f15e689a1d3117', 'v0.0.391', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet1001_cifar10', '0265', '9fedfe5fd643e7355f1062a6db68da310c8962be', 'v0.0.209', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet1001_cifar100', '1841', '88f14ed9df1573e98b0ec2a07009a15066855fda', 'v0.0.283', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('preresnet1202_cifar10', '0339', '6fc686b02191226f39e25a76fc5da26857f7acd9', 'v0.0.246', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnext29_32x4d_cifar10', '0315', '30413525cd4466dbef759294eda9b702bc39648f', 'v0.0.169', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnext29_32x4d_cifar100', '1950', '13ba13d92f6751022549a3b370ae86d3b13ae2d1', 'v0.0.200', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnext29_32x4d_svhn', '0280', 'e85c5217944cdfafb0a538dd7cc817cffaada7c4', 'v0.0.275', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnext29_16x64d_cifar10', '0241', '4133d3d04f9b10b132dcb959601d36f10123f8c2', 'v0.0.176', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnext29_16x64d_cifar100', '1693', '05e9a7f113099a98b219cad622ecfad5517a3b54', 'v0.0.322', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnext29_16x64d_svhn', '0268', '74332b714cd278bfca3f09dafe2a9d117510e9a4', 'v0.0.358', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnext272_1x64d_cifar10', '0255', '070ccc35c2841b7715b9eb271197c9bb316f3093', 'v0.0.372', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnext272_1x64d_cifar100', '1911', '114eb0f8a0d471487e819b8fd156c1286ef91b7a', 'v0.0.372', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnext272_1x64d_svhn', '0235', 'ab0448469bbd7d476f8bed1bf86403304b028e7c', 'v0.0.372', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnext272_2x32d_cifar10', '0274', 'd2ace03c413be7e42c839c84db8dd0ebb5d69512', 'v0.0.375', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnext272_2x32d_cifar100', '1834', '0b30c4701a719995412882409339f3553a54c9d1', 'v0.0.375', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnext272_2x32d_svhn', '0244', '39b8a33612d335a0193b867b38c0b09d168de6c3', 'v0.0.375', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet20_cifar10', '0601', '935d89433e803c8a3027c81f1267401e7caccce6', 'v0.0.362', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet20_cifar100', '2854', '8c7abf66d8c1418cb3ca760f5d1efbb42738036b', 'v0.0.362', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet20_svhn', '0323', 'd77df31c62d1504209a5ba47e59ccb0ae84500b2', 'v0.0.362', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet56_cifar10', '0413', 'b61c143989cb2901bec48dded4c6ddcae91aabc4', 'v0.0.362', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet56_cifar100', '2294', '7fa54f4593f364c2363cb3ee8d5bc1285af1ade5', 'v0.0.362', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet56_svhn', '0264', '93839c762a97bd0b5bd27c71fd64c227afdae3ed', 'v0.0.362', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet110_cifar10', '0363', '1ddec2309ff61c2c0e14c96d51a1b846afdc2acc', 'v0.0.362', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet110_cifar100', '2086', 'a82c30938028a172dd6a124152bc0952b55a2f49', 'v0.0.362', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet110_svhn', '0235', '9572ba7394c774b8d056b24a7631ef47e53024b8', 'v0.0.362', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet164bn_cifar10', '0339', '1085dab6467cb18e554123663816094f080fc626', 'v0.0.362', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet164bn_cifar100', '1995', '97dd4ab630f6277cf7b07cbdcbf4ae8ddce4d401', 'v0.0.362', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet164bn_svhn', '0245', 'af0a90a50fb3c91eef039178a681e69aae703f3a', 'v0.0.362', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet272bn_cifar10', '0339', '812db5187bab9aa5203611c1c174d0e51c81761c', 'v0.0.390', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet272bn_cifar100', '1907', '179e1c38ba714e1babf6c764ca735f256d4cd122', 'v0.0.390', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet272bn_svhn', '0238', '0e16badab35b483b1a1b0e7ea2a615de714f7424', 'v0.0.390', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet542bn_cifar10', '0347', 'd1542214765f1923f2fdce810aef5dc2e523ffd2', 'v0.0.385', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet542bn_cifar100', '1887', '9c4e7623dc06a56edabf04f4427286916843df85', 'v0.0.385', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('seresnet542bn_svhn', '0226', '71a8f2986cbc1146f9a41d1a08ecba52649b8efd', 'v0.0.385', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet20_cifar10', '0618', 'eabb3fce8373cbeb412ced9a79a1e2f9c6c3689c', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet20_cifar100', '2831', 'fe7558e0ae554d39d8761f234e8328262ee31efd', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet20_svhn', '0324', '061daa587dd483744d5b60d2fd3b2750130dd8a1', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet56_cifar10', '0451', 'fc23e153ccfaddd52de61d77570a0befeee1e687', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet56_cifar100', '2305', 'c4bdc5d7bbaa0d9f6e2ffdf2abe4808ad26d0f66', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet56_svhn', '0271', 'c91e922f1b3d0ea634db8e467e9ab4a6b8dc7722', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet110_cifar10', '0454', '418daea9d2253a3e9fbe4eb80eb4dcc6f29d5925', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet110_cifar100', '2261', 'ed7d3c3e51ed2ea9a827ed942e131c78784813b7', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet110_svhn', '0259', '556909fd942d3a42e424215374b340680b705424', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet164bn_cifar10', '0373', 'ff353a2910f85db66d8afca0a4150176bcdc7a69', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet164bn_cifar100', '2005', 'df1163c4d9de72c53efc37758773cc943be7f055', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet164bn_svhn', '0256', 'f8dd4e06596841f0c7f9979fb566b9e57611522f', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet272bn_cifar10', '0339', '606d096422394857cb1f45ecd7eed13508158a60', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet272bn_cifar100', '1913', 'cb71511346e441cbd36bacc93c821e8b6101456a', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet272bn_svhn', '0249', '904d74a2622d870f8a2384f9e50a84276218acc3', 'v0.0.379', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet542bn_cifar10', '0308', '652bc8846cfac7a2ec6625789531897339800202', 'v0.0.382', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet542bn_cifar100', '1945', '9180f8632657bb8f7b6583e47d04ce85defa956c', 'v0.0.382', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('sepreresnet542bn_svhn', '0247', '318a8325afbfbaa8a35d54cbd1fa7da668ef1389', 'v0.0.382', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet110_a48_cifar10', '0372', 'eb185645cda89e0c3c47b11c4b2d14ff18fa0ae1', 'v0.0.184', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet110_a48_cifar100', '2095', '95da1a209916b3cf4af7e8dc44374345a88c60f4', 'v0.0.186', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet110_a48_svhn', '0247', 'd48bafbebaabe9a68e5924571752b3d7cd95d311', 'v0.0.281', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet110_a84_cifar10', '0298', '7b835a3cf19794478d478aced63ca9e855c3ffeb', 'v0.0.185', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet110_a84_cifar100', '1887', 'ff711084381f217f84646c676e4dcc90269dc516', 'v0.0.199', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet110_a84_svhn', '0243', '971576c61cf30e02f13da616afc9848b2a609e0e', 'v0.0.392', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet110_a270_cifar10', '0251', '31bdd9d51ec01388cbb2adfb9f822c942de3c4ff', 'v0.0.194', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet110_a270_cifar100', '1710', '7417dd99069d6c8775454475968ae226b9d5ac83', 'v0.0.319', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet110_a270_svhn', '0238', '3047a9bb7c92a09adf31590e3fe6c9bcd36c7a67', 'v0.0.393', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet164_a270_bn_cifar10', '0242', 'daa2a402c1081323b8f2239f2201246953774e84', 'v0.0.264', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet164_a270_bn_cifar100', '1670', '54d99c834bee0ed7402ba46e749e48182ad1599a', 'v0.0.312', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet164_a270_bn_svhn', '0233', '42d4c03374f32645924fc091d599ef7b913e2d32', 'v0.0.396', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet200_a240_bn_cifar10', '0244', '44433afdd2bc32c55dfb1e8347bc44d1c2bf82c7', 'v0.0.268', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet200_a240_bn_cifar100', '1609', '087c02d6882e274054f44482060f193b9fc208bb', 'v0.0.317', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet200_a240_bn_svhn', '0232', 'f9660c25f1bcff9d361aeca8fb3efaccdc0546e7', 'v0.0.397', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet236_a220_bn_cifar10', '0247', 'daa91d74979c451ecdd8b59e4350382966f25831', 'v0.0.285', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet236_a220_bn_cifar100', '1634', 'a45816ebe1d6a67468b78b7a93334a41aca1c64b', 'v0.0.312', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet236_a220_bn_svhn', '0235', 'f74fe248b6189699174c90bc21e7949d3cca8130', 'v0.0.398', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet272_a200_bn_cifar10', '0239', '586b1ecdc8b34b69dcae4ba57f71c24583cca9b1', 'v0.0.284', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet272_a200_bn_cifar100', '1619', '98bc2f48da0f2c68bc5376c17b0aefc734a64881', 'v0.0.312', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('pyramidnet272_a200_bn_svhn', '0240', '96f6e740dcdc917d776f6df855e3437c93d0da4f', 'v0.0.404', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet40_k12_cifar10', '0561', '8b8e819467a2e4c450e4ff72ced80582d0628b68', 'v0.0.193', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet40_k12_cifar100', '2490', 'd182c224d6df2e289eef944d54fea9fd04890961', 'v0.0.195', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet40_k12_svhn', '0305', 'ac0de84a1a905b768c66f0360f1fb9bd918833bf', 'v0.0.278', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet40_k12_bc_cifar10', '0643', '6dc86a2ea1d088f088462f5cbac06cc0f37348c0', 'v0.0.231', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet40_k12_bc_cifar100', '2841', '1e9db7651a21e807c363c9f366bd9e91ce2f296f', 'v0.0.232', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet40_k12_bc_svhn', '0320', '320760528b009864c68ff6c5b874e9f351ea7a07', 'v0.0.279', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet40_k24_bc_cifar10', '0452', '669c525548a4a2392c5e3c380936ad019f2be7f9', 'v0.0.220', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet40_k24_bc_cifar100', '2267', '411719c0177abf58eddaddd05511c86db0c9d548', 'v0.0.221', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet40_k24_bc_svhn', '0290', 'f4440d3b8c974c9e1014969f4d5832c6c90195d5', 'v0.0.280', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet40_k36_bc_cifar10', '0404', 'b1a4cc7e67db1ed8c5583a59dc178cc7dc2c572e', 'v0.0.224', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet40_k36_bc_cifar100', '2050', 'cde836fafec1e5d6c8ed69fd3cfe322e8e71ef1d', 'v0.0.225', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet40_k36_bc_svhn', '0260', '8c7db0a291a0797a8bc3c709bff7917bc41471cc', 'v0.0.311', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet100_k12_cifar10', '0366', '26089c6e70236e8f25359de6fda67b84425888ab', 'v0.0.205', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet100_k12_cifar100', '1964', '5e10cd830c06f6ab178e9dd876c83c754ca63f00', 'v0.0.206', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet100_k12_svhn', '0260', '57fde50e9f44edc0486b62a1144565bc77d5bdfe', 'v0.0.311', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet100_k24_cifar10', '0313', '397f0e39b517c05330221d4f3a9755eb5e561be1', 'v0.0.252', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet100_k24_cifar100', '1808', '1c0a8067283952709d8e09c774c3a404f51e0079', 'v0.0.318', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet100_k12_bc_cifar10', '0416', 'b9232829b13c3f3f2ea15f4be97f500b7912c3c2', 'v0.0.189', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet100_k12_bc_cifar100', '2119', '05a6f02772afda51a612f5b92aadf19ffb60eb72', 'v0.0.208', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet190_k40_bc_cifar10', '0252', '2896fa088aeaef36fcf395d404d97ff172d78943', 'v0.0.286', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet250_k24_bc_cifar10', '0267', 'f8f9d3052bae1fea7e33bb1ce143c38b4aa5622b', 'v0.0.290', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('densenet250_k24_bc_cifar100', '1739', '09ac3e7d9fbe6b97b170bd838dac20ec144b4e49', 'v0.0.303', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('xdensenet40_2_k24_bc_cifar10', '0531', 'b91a9dc35877c4285fe86f49953d1118f6b69e57', 'v0.0.226', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('xdensenet40_2_k24_bc_cifar100', '2396', '0ce8f78ab9c6a4786829f816ae0615c7905f292c', 'v0.0.227', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('xdensenet40_2_k24_bc_svhn', '0287', 'fd9b6def10f154378a76383cf023d7f2f5ae02ab', 'v0.0.306', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('xdensenet40_2_k36_bc_cifar10', '0437', 'ed264a2060836c7440f0ccde57315e1ec6263ff0', 'v0.0.233', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('xdensenet40_2_k36_bc_cifar100', '2165', '6f68f83dc31dea5237e6362e6c6cfeed48a8d9e3', 'v0.0.234', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('xdensenet40_2_k36_bc_svhn', '0274', '540a69f13a6ce70bfef13657e70dfa414d966581', 'v0.0.306', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn16_10_cifar10', '0293', 'ce810d8a17a2deb73eddb5bec8709f93278bc53e', 'v0.0.166', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn16_10_cifar100', '1895', 'bef9809c845deb1b2bb0c9aaaa7c58bd97740504', 'v0.0.204', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn16_10_svhn', '0278', '5ab2a4edd5398a03d2e28db1b075bf0313ae5828', 'v0.0.271', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn28_10_cifar10', '0239', 'fe97dcd6d0dd8dda8e9e38e6cfa320cffb9955ce', 'v0.0.166', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn28_10_cifar100', '1788', '8c3fe8185d3af9cc3813fe376cab895f6780ac18', 'v0.0.320', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn28_10_svhn', '0271', 'd62b6bbaef7228706a67c2c8416681f97c6d4688', 'v0.0.276', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn40_8_cifar10', '0237', '8dc84ec730f35c4b8968a022bc045c0665410840', 'v0.0.166', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn40_8_cifar100', '1803', '0d18bfbff85951d88a881dc6a15ad46f56ea8c28', 'v0.0.321', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn40_8_svhn', '0254', 'dee59602c10e5d56bd9c168e8e8400792b9a8b08', 'v0.0.277', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn20_10_1bit_cifar10', '0326', 'e6140f8a5eacd5227e8748457b5ee9f5f519d2d5', 'v0.0.302', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn20_10_1bit_cifar100', '1904', '149860c829a812224dbf2086c8ce95c2eba322fe', 'v0.0.302', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn20_10_1bit_svhn', '0273', 'ffe96cb78cd304d5207fff0cf08835ba2a71f666', 'v0.0.302', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn20_10_32bit_cifar10', '0314', 'a18146e8b0f99a900c588eb8995547393c2d9d9e', 'v0.0.302', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn20_10_32bit_cifar100', '1812', '70d8972c7455297bc21fdbe4fc040c2f6b3593a3', 'v0.0.302', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('wrn20_10_32bit_svhn', '0259', 'ce402a58887cbae3a38da1e845a1c1479a6d7213', 'v0.0.302', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('ror3_56_cifar10', '0543', '44f0f47d2e1b609880ee1b623014c52a9276e2ea', 'v0.0.228', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('ror3_56_cifar100', '2549', '34be6719cd128cfe60ba93ac6d250ac4c1acf0a5', 'v0.0.229', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('ror3_56_svhn', '0269', '5a9ad66c8747151be1d2fb9bc854ae382039bdb9', 'v0.0.287', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('ror3_110_cifar10', '0435', 'fb2a2b0499e4a4d92bdc1d6792bd5572256d5165', 'v0.0.235', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('ror3_110_cifar100', '2364', 'd599e3a93cd960c8bfc5d05c721cd48fece5fa6f', 'v0.0.236', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('ror3_110_svhn', '0257', '155380add8d351d2c12026d886a918f1fc3f9fd0', 'v0.0.287', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('ror3_164_cifar10', '0393', 'de7b6dc60ad6a297bd55ab65b6d7b1225b0ef6d1', 'v0.0.294', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('ror3_164_cifar100', '2234', 'd37483fccc7fc1a25ff90ef05ecf1b8eab3cc1c4', 'v0.0.294', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('ror3_164_svhn', '0273', 'ff0d9af0d40ef204393ecc904b01a11aa63acc01', 'v0.0.294', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('rir_cifar10', '0328', '414c3e6088ae1e83aa1a77c43e38f940c18a0ce2', 'v0.0.292', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('rir_cifar100', '1923', 'de8ec24a232b94be88f4208153441f66098a681c', 'v0.0.292', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('rir_svhn', '0268', '12fcbd3bfc6b4165e9b23f3339a1b751b4b8f681', 'v0.0.292', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('shakeshakeresnet20_2x16d_cifar10', '0515', 'ef71ec0d5ef928ef8654294114a013895abe3f9a', 'v0.0.215', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('shakeshakeresnet20_2x16d_cifar100', '2922', '4d07f14234b1c796b3c1dfb24d4a3220a1b6b293', 'v0.0.247', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('shakeshakeresnet20_2x16d_svhn', '0317', 'a693ec24fb8fe2c9f15bcc6b1050943c0c5d595a', 'v0.0.295', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('shakeshakeresnet26_2x32d_cifar10', '0317', 'ecd1f8337cc90b5378b4217fb2591f2ed0f02bdf', 'v0.0.217', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('shakeshakeresnet26_2x32d_cifar100', '1880', 'b47e371f60c9fed9eaac960568783fb6f83a362f', 'v0.0.222', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('shakeshakeresnet26_2x32d_svhn', '0262', 'c1b8099ece97e17ce85213e4ecc6e50a064050cf', 'v0.0.295', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diaresnet20_cifar10', '0622', '5e1a02bf2347d48651a5feabe97f7caf215bacc9', 'v0.0.340', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diaresnet20_cifar100', '2771', '28aa1a18d91334e274d3157114fc5c72e47c6c65', 'v0.0.342', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diaresnet20_svhn', '0323', 'b8ee92c9d86de6a6adc80988518fe0544759ca4f', 'v0.0.342', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diaresnet56_cifar10', '0505', '8ac8680448b2999bd1e03eed60373ea78eba9a44', 'v0.0.340', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diaresnet56_cifar100', '2435', '19085975afc7ee902a6d663eb371554c9519b467', 'v0.0.342', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diaresnet56_svhn', '0268', 'bd2ec7558697aff1e0fd229d3e933a08c4c302e9', 'v0.0.342', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diaresnet110_cifar10', '0410', '0c00a7daec69b57ab41d4a55e1026da33ecf4539', 'v0.0.340', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diaresnet110_cifar100', '2211', '7096ddb3a393ad28b27ece19263c203068a11b6d', 'v0.0.342', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diaresnet110_svhn', '0247', '635e42cfac6ed67e15b8a5526c8232f768d11201', 'v0.0.342', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diaresnet164bn_cifar10', '0350', 'd31f2ebce3acb419b07dc4d298018ffea2599fea', 'v0.0.340', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diaresnet164bn_cifar100', '1953', 'b1c474d27de3a291a45856a3e3d256b7fda90dd0', 'v0.0.342', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diaresnet164bn_svhn', '0244', '0b8f67132b3911e6328733b666bf6a0fed133eeb', 'v0.0.342', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diapreresnet20_cifar10', '0642', '14a1eb85c6346c81336b490cc49f2e6b809c193e', 'v0.0.343', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diapreresnet20_cifar100', '2837', 'f7675c09ca5f742376a102e3c8c5156aea4e24b9', 'v0.0.343', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diapreresnet20_svhn', '0303', 'dc3e3a453ffc8aff7d014bc15867db4ce2d8e1e9', 'v0.0.343', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diapreresnet56_cifar10', '0483', '41cae958be1bec3f839126cd167051de6a981d0a', 'v0.0.343', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diapreresnet56_cifar100', '2505', '5d357985236c021ab965101b94980cdc4722a70d', 'v0.0.343', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diapreresnet56_svhn', '0280', '537ebc66fe32f9bb6fb6bb8f9ac6402f8ec93e09', 'v0.0.343', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diapreresnet110_cifar10', '0425', '5638501600355b8b195179fb2be5d5989e93b0e0', 'v0.0.343', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diapreresnet110_cifar100', '2269', 'c993cc296c39bc9c8c0fc6115bfe6c7d720a0903', 'v0.0.343', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diapreresnet110_svhn', '0242', 'a156cfb58ffda89c0e87cd8aef82f56f79b40ea5', 'v0.0.343', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diapreresnet164bn_cifar10', '0356', '6ec898c89c66eb32b0e42b78a027af4920b24366', 'v0.0.343', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diapreresnet164bn_cifar100', '1999', '00872f989c33321f7938a40c0fd9f44669c4c483', 'v0.0.343', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('diapreresnet164bn_svhn', '0256', '134048810bd2e12dc68035d4ecad6af525639db0', 'v0.0.343', '', '', 'cf', 0, 0.0, 0, ''), # noqa ('resnet10_cub', '2777', '4525b5932665698b3f4551dde99d22ce03878172', 'v0.0.335', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('resnet12_cub', '2727', 'c15248832d2fe88c58fb603df3925e09b3d797e7', 'v0.0.336', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('resnet14_cub', '2477', '5051bbc659c0303c1860114f1a32a18942de9099', 'v0.0.337', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('resnet16_cub', '2365', 'b831356c696db80fec8deb2381875f37bf60dd93', 'v0.0.338', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('resnet18_cub', '2333', '200d8b9c48baf073a4c2ea0cbba4d7f81288e684', 'v0.0.344', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('resnet26_cub', '2316', '599ab467f396e979028f2ae5d65330949c9ddc86', 'v0.0.345', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('seresnet10_cub', '2772', 'f52526ec21bbb534a6d51be42bdb5322fbda919b', 'v0.0.361', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('seresnet12_cub', '2651', '5c0e7f835c65d1f2f85048d0169788377490b819', 'v0.0.361', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('seresnet14_cub', '2416', 'a4cda9012ec2380fa74f3d74879f0d206fcaf5b5', 'v0.0.361', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('seresnet16_cub', '2332', '43a819b7e226d65aa77a4c90fdb7c70eb5093505', 'v0.0.361', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('seresnet18_cub', '2352', '414fa2775de28ce3a1a0bc142ab674fa3a6638e3', 'v0.0.361', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('seresnet26_cub', '2299', '5aa0a7d1ef9c33f8dbf3ff1cb1a1a855627163f4', 'v0.0.361', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('mobilenet_w1_cub', '2377', '8428471f4ae08709b71ff2f69cf1a6fd286004c9', 'v0.0.346', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('proxylessnas_mobile_cub', '2266', 'e4b5098a17425c97740fc564460aa95d9eb2a41e', 'v0.0.347', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('ntsnet_cub', '1277', 'f6f330abfabcc2ea17a8d4b8977a6ea322ddf532', 'v0.0.334', '', '', 'cub', 0, 0.0, 0, ''), # noqa ('pspnet_resnetd101b_voc', '8144', 'c22f021948461a7b7ab1ef1265a7948762770c83', 'v0.0.297', '', '', 'voc', 0, 0.0, 0, ''), # noqa ('pspnet_resnetd50b_ade20k', '3687', '13f22137d7dd06c6de2ffc47e6ed33403d3dd2cf', 'v0.0.297', '', '', 'ade20k', 0, 0.0, 0, ''), # noqa ('pspnet_resnetd101b_ade20k', '3797', '115d62bf66477221b83337208aefe0f2f0266da2', 'v0.0.297', '', '', 'ade20k', 0, 0.0, 0, ''), # noqa ('pspnet_resnetd101b_cityscapes', '7172', '0a6efb497bd4fc763d27e2121211e06f72ada7ed', 'v0.0.297', '', '', 'cs', 0, 0.0, 0, ''), # noqa ('pspnet_resnetd101b_coco', '6741', 'c8b13be65cb43402fce8bae945f6e0d0a3246b92', 'v0.0.297', '', '', 'cocoseg', 0, 0.0, 0, ''), # noqa ('deeplabv3_resnetd101b_voc', '8024', 'fd8bf74ffc96c97b30bcd3b6ce194a2daed68098', 'v0.0.298', '', '', 'voc', 0, 0.0, 0, ''), # noqa ('deeplabv3_resnetd152b_voc', '8120', 'f2dae198b3cdc41920ea04f674b665987c68d7dc', 'v0.0.298', '', '', 'voc', 0, 0.0, 0, ''), # noqa ('deeplabv3_resnetd50b_ade20k', '3713', 'bddbb458e362e18f5812c2307b322840394314bc', 'v0.0.298', '', '', 'ade20k', 0, 0.0, 0, ''), # noqa ('deeplabv3_resnetd101b_ade20k', '3784', '977446a5fb32b33f168f2240fb6b7ef9f561fc1e', 'v0.0.298', '', '', 'ade20k', 0, 0.0, 0, ''), # noqa ('deeplabv3_resnetd101b_coco', '6773', 'e59c1d8f7ed5bcb83f927d2820580a2f4970e46f', 'v0.0.298', '', '', 'cocoseg', 0, 0.0, 0, ''), # noqa ('deeplabv3_resnetd152b_coco', '6899', '7e946d7a63ed255dd38afacebb0a0525e735da64', 'v0.0.298', '', '', 'cocoseg', 0, 0.0, 0, ''), # noqa ('fcn8sd_resnetd101b_voc', '8040', '66edc0b073f0dec66c18bb163c7d6de1ddbc32a3', 'v0.0.299', '', '', 'voc', 0, 0.0, 0, ''), # noqa ('fcn8sd_resnetd50b_ade20k', '3339', 'e1dad8a15c2a1be1138bd3ec51ba1b100bb8d9c9', 'v0.0.299', '', '', 'ade20k', 0, 0.0, 0, ''), # noqa ('fcn8sd_resnetd101b_ade20k', '3588', '30d05ca42392a164ea7c93a9cbd7f33911d3c1af', 'v0.0.299', '', '', 'ade20k', 0, 0.0, 0, ''), # noqa ('fcn8sd_resnetd101b_coco', '6011', 'ebe2ad0bc1de5b4cecade61d17d269aa8bf6df7f', 'v0.0.299', '', '', 'coco', 0, 0.0, 0, ''), # noqa ('icnet_resnetd50b_cityscapes', '6402', 'b380f8cc91ffeac29df6c245f34fbc89aa095c53', 'v0.0.457', '', '', 'cs', 0, 0.0, 0, ''), # noqa ('fastscnn_cityscapes', '6576', 'b9859a25c6940383248bf2f53e2a5f02c1727cc8', 'v0.0.474', '', '', 'cs', 0, 0.0, 0, ''), # noqa ('sinet_cityscapes', '6172', '8ecd14141b85a682c2cc1c74e13077fee4746d87', 'v0.0.437', '', '', 'cs', 0, 0.0, 0, ''), # noqa ('bisenet_resnet18_celebamaskhq', '0000', '98affefd74cc7f87314a96f148dbdbf4055bbfcb', 'v0.0.462', '', '', 'cs', 0, 0.0, 0, ''), # noqa ('danet_resnetd50b_cityscapes', '6799', 'c5740c9fd471c141a584455efd2167858dd8cb94', 'v0.0.468', '', '', 'cs', 0, 0.0, 0, ''), # noqa ('danet_resnetd101b_cityscapes', '6810', 'f1eeb724757bbcdc067de9cdfad6d463fb9fdb90', 'v0.0.468', '', '', 'cs', 0, 0.0, 0, ''), # noqa ('alphapose_fastseresnet101b_coco', '7415', 'b9e3f64a9fe44198b23e7278cc3a94fd94247e20', 'v0.0.454', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('simplepose_resnet18_coco', '6631', '7c3656b35607805bdb877e7134938fd4510b2c8c', 'v0.0.455', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('simplepose_resnet50b_coco', '7102', '621d2545c8b39793a0fe3a48054684f8b982a978', 'v0.0.455', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('simplepose_resnet101b_coco', '7244', '540c29ec1794535fe9ee319cdb5527ed3a6d3eb5', 'v0.0.455', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('simplepose_resnet152b_coco', '7253', '3a358d7de566d51e90b9d3a1f44a1c9c948769ed', 'v0.0.455', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('simplepose_resneta50b_coco', '7170', '2d973dc512d02f24d0de5a98008898c0a03a2c99', 'v0.0.455', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('simplepose_resneta101b_coco', '7297', '08175610ce24a4e476b49030c1c1378d74158f70', 'v0.0.455', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('simplepose_resneta152b_coco', '7344', 'dacb65cfe1261e5f2013cde18f2d5753c6453568', 'v0.0.455', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('simplepose_mobile_resnet18_coco', '6625', '1e27b206737a33678b67b638bba8a4d024ec2dc3', 'v0.0.456', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('simplepose_mobile_resnet50b_coco', '7110', '023f910cab8c0750bb24e6a14aecdeb42fcc5561', 'v0.0.456', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('simplepose_mobile_mobilenet_w1_coco', '6410', '0ca46de0f31cb3d700ce1310f2eba19a3308a3f0', 'v0.0.456', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('simplepose_mobile_mobilenetv2b_w1_coco', '6374', '94f86097959d1acca6605d0d6487fd2d0899dfeb', 'v0.0.456', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('simplepose_mobile_mobilenetv3_small_w1_coco', '5434', '5cedb749e09a30c779073fba0e71546ad8b022d5', 'v0.0.456', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('simplepose_mobile_mobilenetv3_large_w1_coco', '6367', '9515de071e264aa95514b9b85ab60a5da23f5f69', 'v0.0.456', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('lwopenpose2d_mobilenet_cmupan_coco', '3999', 'a6b9c66bb43e7819464f1ce23c6e3433b726b95d', 'v0.0.458', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('lwopenpose3d_mobilenet_cmupan_coco', '3999', '4c727e1dece57dede247da2d7b97d647c0d51b0a', 'v0.0.458', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('ibppose_coco', '6487', '1958fe10a02a1c441e40d109d3281845488e1e2f', 'v0.0.459', '', '', 'cocohpe', 0, 0.0, 0, ''), # noqa ('jasperdr10x5_en', '2192', 'c2c00e2cc4a4302731e93c7cf9e59378a50668ab', 'v0.0.555', '', '', 'mcv', 0, 0.0, 0, ''), # noqa ('jasperdr10x5_en_nr', '1792', '0417568d949907fcb9cf99de6646849fee1f2840', 'v0.0.555', '', '', 'mcv', 0, 0.0, 0, ''), # noqa ('quartznet5x5_en_ls', '4469', '45bb0d815f16dcd1e754e90f82175b5366f75121', 'v0.0.555', '', '', 'mcv', 0, 0.0, 0, ''), # noqa ('quartznet15x5_en', '1679', 'd59dfb8a63e6661a43ded110c059d587dfa77eee', 'v0.0.555', '', '', 'mcv', 0, 0.0, 0, ''), # noqa ('quartznet15x5_en_nr', '1776', 'dfc92f272f3d7f3a0f040b52418605016a68250e', 'v0.0.555', '', '', 'mcv', 0, 0.0, 0, ''), # noqa ('quartznet15x5_de', '1167', 'fb6c1f372bb80014cc7b9f04d7d615229b36084c', 'v0.0.555', '', '', 'mcv', 0, 0.0, 0, ''), # noqa ('quartznet15x5_fr', '1388', '18af35d6317462f2afdd3da5fc636f052459f211', 'v0.0.555', '', '', 'mcv', 0, 0.0, 0, ''), # noqa ('quartznet15x5_it', '1502', '04cac1876b9bfc82f1bc98b3c41ed664434168d5', 'v0.0.555', '', '', 'mcv', 0, 0.0, 0, ''), # noqa ('quartznet15x5_es', '1295', '0e3f57f74b7b21bdc568620a1edeea6338a5691a', 'v0.0.555', '', '', 'mcv', 0, 0.0, 0, ''), # noqa ('quartznet15x5_ca', '0842', '05b4e456a3035a095cbc2212a9982ea12850dacb', 'v0.0.555', '', '', 'mcv', 0, 0.0, 0, ''), # noqa ('quartznet15x5_pl', '1359', 'a57dfee49831403bb01b8624fac39f7403365ee3', 'v0.0.555', '', '', 'mcv', 0, 0.0, 0, ''), # noqa ('quartznet15x5_ru', '1648', 'deaa15ba85f5c1447076c744de2231fbc7eb94e8', 'v0.0.555', '', '', 'mcv', 0, 0.0, 0, ''), # noqa ('quartznet15x5_ru34', '0969', '977a01574b0c741435bfe76c3bcc6c58e22f816f', 'v0.0.555', '', '', 'mcv', 0, 0.0, 0, ''), # noqa ]} imgclsmob_repo_url = 'https://github.com/osmr/imgclsmob' def get_model_name_suffix_data(model_name): if model_name not in _model_sha1: raise ValueError("Pretrained model for {name} is not available.".format(name=model_name)) error, sha1_hash, repo_release_tag, _, _, _, _, _, _, _ = _model_sha1[model_name] return error, sha1_hash, repo_release_tag def get_model_file(model_name, local_model_store_dir_path=os.path.join("~", ".torch", "models")): """ Return location for the pretrained on local file system. This function will download from online model zoo when model cannot be found or has mismatch. The root directory will be created if it doesn't exist. Parameters: ---------- model_name : str Name of the model. local_model_store_dir_path : str, default $TORCH_HOME/models Location for keeping the model parameters. Returns: ------- file_path Path to the requested pretrained model file. """ error, sha1_hash, repo_release_tag = get_model_name_suffix_data(model_name) short_sha1 = sha1_hash[:8] file_name = "{name}-{error}-{short_sha1}.pth".format( name=model_name, error=error, short_sha1=short_sha1) local_model_store_dir_path = os.path.expanduser(local_model_store_dir_path) file_path = os.path.join(local_model_store_dir_path, file_name) if os.path.exists(file_path): if _check_sha1(file_path, sha1_hash): return file_path else: logging.warning("Mismatch in the content of model file detected. Downloading again.") else: logging.info("Model file not found. Downloading to {}.".format(file_path)) if not os.path.exists(local_model_store_dir_path): os.makedirs(local_model_store_dir_path) zip_file_path = file_path + ".zip" _download( url="{repo_url}/releases/download/{repo_release_tag}/{file_name}.zip".format( repo_url=imgclsmob_repo_url, repo_release_tag=repo_release_tag, file_name=file_name), path=zip_file_path, overwrite=True) with zipfile.ZipFile(zip_file_path) as zf: zf.extractall(local_model_store_dir_path) os.remove(zip_file_path) if _check_sha1(file_path, sha1_hash): return file_path else: raise ValueError("Downloaded file has different hash. Please try again.") def _download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True): """ Download an given URL Parameters: ---------- url : str URL to download path : str, optional Destination path to store downloaded file. By default stores to the current directory with same name as in url. overwrite : bool, optional Whether to overwrite destination file if already exists. sha1_hash : str, optional Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified but doesn't match. retries : integer, default 5 The number of times to attempt the download in case of failure or non 200 return codes verify_ssl : bool, default True Verify SSL certificates. Returns: ------- str The file path of the downloaded file. """ import warnings try: import requests except ImportError: class requests_failed_to_import(object): pass requests = requests_failed_to_import if path is None: fname = url.split("/")[-1] # Empty filenames are invalid assert fname, "Can't construct file-name from this URL. " \ "Please set the `path` option manually." else: path = os.path.expanduser(path) if os.path.isdir(path): fname = os.path.join(path, url.split('/')[-1]) else: fname = path assert retries >= 0, "Number of retries should be at least 0" if not verify_ssl: warnings.warn( "Unverified HTTPS request is being made (verify_ssl=False). " "Adding certificate verification is strongly advised.") if overwrite or not os.path.exists(fname) or (sha1_hash and not _check_sha1(fname, sha1_hash)): dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname))) if not os.path.exists(dirname): os.makedirs(dirname) while retries + 1 > 0: # Disable pyling too broad Exception # pylint: disable=W0703 try: print("Downloading {} from {}...".format(fname, url)) r = requests.get(url, stream=True, verify=verify_ssl) if r.status_code != 200: raise RuntimeError("Failed downloading url {}".format(url)) with open(fname, "wb") as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) if sha1_hash and not _check_sha1(fname, sha1_hash): raise UserWarning("File {} is downloaded but the content hash does not match." " The repo may be outdated or download may be incomplete. " "If the `repo_url` is overridden, consider switching to " "the default repo.".format(fname)) break except Exception as e: retries -= 1 if retries <= 0: raise e else: print("download failed, retrying, {} attempt{} left" .format(retries, "s" if retries > 1 else "")) return fname def _check_sha1(file_name, sha1_hash): """ Check whether the sha1 hash of the file content matches the expected hash. Parameters: ---------- file_name : str Path to the file. sha1_hash : str Expected sha1 hash in hexadecimal digits. Returns: ------- bool Whether the file content matches the expected hash. """ sha1 = hashlib.sha1() with open(file_name, "rb") as f: while True: data = f.read(1048576) if not data: break sha1.update(data) return sha1.hexdigest() == sha1_hash def load_model(net, file_path, ignore_extra=True): """ Load model state dictionary from a file. Parameters: ---------- net : Module Network in which weights are loaded. file_path : str Path to the file. ignore_extra : bool, default True Whether to silently ignore parameters from the file that are not present in this Module. """ import torch if ignore_extra: pretrained_state = torch.load(file_path) model_dict = net.state_dict() pretrained_state = {k: v for k, v in pretrained_state.items() if k in model_dict} net.load_state_dict(pretrained_state) else: net.load_state_dict(torch.load(file_path)) def download_model(net, model_name, local_model_store_dir_path=os.path.join("~", ".torch", "models"), ignore_extra=True): """ Load model state dictionary from a file with downloading it if necessary. Parameters: ---------- net : Module Network in which weights are loaded. model_name : str Name of the model. local_model_store_dir_path : str, default $TORCH_HOME/models Location for keeping the model parameters. ignore_extra : bool, default True Whether to silently ignore parameters from the file that are not present in this Module. """ load_model( net=net, file_path=get_model_file( model_name=model_name, local_model_store_dir_path=local_model_store_dir_path), ignore_extra=ignore_extra) def calc_num_params(net): """ Calculate the count of trainable parameters for a model. Parameters: ---------- net : Module Analyzed model. """ import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count
94,745
110.465882
205
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/tresnet.py
""" TResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'TResNet: High Performance GPU-Dedicated Architecture,' https://arxiv.org/abs/2003.13630. NB: Not tested! """ __all__ = ['TResNet', 'tresnet_m', 'tresnet_l', 'tresnet_xl'] import os import torch import torch.nn as nn import torch.nn.functional as F from .common import conv1x1_block, conv3x3_block, SEBlock def anti_aliased_downsample(x): """ Anti-Aliased Downsample operation. Parameters: ---------- x : Tensor Input tensor. Returns: ------- Tensor Resulted tensor. """ channels = x.shape[1] weight = torch.tensor([1., 2., 1.], dtype=x.dtype, device=x.device) weight = weight[:, None] * weight[None, :] weight = weight / torch.sum(weight) weight = weight[None, None, :, :].repeat((channels, 1, 1, 1)) x_pad = F.pad(x, pad=(1, 1, 1, 1), mode="reflect") x = F.conv2d(x_pad, weight, stride=2, padding=0, groups=channels) return x class TResBlock(nn.Module): """ Simple TResNet block for residual path in TResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. activation : str Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, stride, activation): super(TResBlock, self).__init__() self.resize = (stride > 1) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, activation=activation) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, activation=activation) self.se = SEBlock( channels=out_channels, mid_channels=max(out_channels // 4, 64)) def __call__(self, x): x = self.conv1(x) if self.resize: x = anti_aliased_downsample(x) x = self.conv2(x) x = self.se(x) return x class TResBottleneck(nn.Module): """ TResNet bottleneck block for residual path in TResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. use_se : bool Whether to use SE-module. activation : str Activation function or name of activation function. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, use_se, activation, bottleneck_factor=4): super(TResBottleneck, self).__init__() self.use_se = use_se self.resize = (stride > 1) mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=activation) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, activation=activation) if self.resize: self.pool = nn.AvgPool2d( kernel_size=3, stride=stride, padding=1) if self.use_se: self.se = SEBlock( channels=mid_channels, mid_channels=max(mid_channels * bottleneck_factor // 8, 64)) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=activation) def __call__(self, x): x = self.conv1(x) x = self.conv2(x) if self.resize: x = self.pool(x) if self.use_se: x = self.se(x) x = self.conv3(x) return x class ResADownBlock(nn.Module): """ TResNet downsample block for the identity branch of a residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(ResADownBlock, self).__init__() assert (stride > 1) self.pool = nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=None) def __call__(self, x): x = self.pool(x) x = self.conv(x) return x class TResUnit(nn.Module): """ TResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. use_se : bool Whether to use SE-module. activation : str Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, stride, use_se, activation, bottleneck=True): super(TResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = TResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, use_se=use_se, activation=activation) else: self.body = TResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=activation) if self.resize_identity: self.identity_block = ResADownBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) self.activ = nn.ReLU(inplace=True) def __call__(self, x): if self.resize_identity: identity = self.identity_block(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x def space_to_depth(x): """ Space-to-Depth operation. Parameters: ---------- x : Tensor Input tensor. Returns: ------- Tensor Resulted tensor. """ k = 4 batch, channels, height, width = x.size() new_height = height // k new_width = width // k new_channels = channels * k * k x = x.view(batch, channels, new_height, k, new_width, k) x = x.permute(0, 3, 5, 1, 2, 4).contiguous() x = x.view(batch, new_channels, new_height, new_width) return x class TResInitBlock(nn.Module): """ TResNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activation : str Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, activation): super(TResInitBlock, self).__init__() mid_channels = in_channels * 16 self.conv = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, activation=activation) def __call__(self, x): x = space_to_depth(x) x = anti_aliased_downsample(x) x = self.conv(x) return x class TResNet(nn.Module): """ TResNet model from 'TResNet: High Performance GPU-Dedicated Architecture,' https://arxiv.org/abs/2003.13630. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : list of bool Whether to use a bottleneck or simple block in units for each stage. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(224, 224), num_classes=1000): super(TResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes activation = (lambda: nn.LeakyReLU(negative_slope=0.01, inplace=True)) self.features = nn.Sequential() self.features.add_module("init_block", TResInitBlock( in_channels=in_channels, out_channels=init_block_channels, activation=activation)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 use_se = not (i == len(channels) - 1) stage.add_module("unit{}".format(j + 1), TResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, use_se=use_se, bottleneck=bottleneck[i], activation=activation)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1)) self.output = nn.Sequential() self.output.add_module("fc", nn.Linear( in_features=in_channels, out_features=num_classes)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_tresnet(version, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create TResNet model with specific parameters. Parameters: ---------- version : str Version of TResNet ('m', 'l' or 'xl'). bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if version == "m": layers = [3, 4, 11, 3] width_scale = 1.0 elif version == "l": layers = [4, 5, 18, 3] width_scale = 1.2 elif version == "xl": layers = [4, 5, 24, 3] width_scale = 1.3 else: raise ValueError("Unsupported TResNet version {}".format(version)) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if width_scale != 1.0: init_block_channels = int(init_block_channels * width_scale) channels_per_layers = [init_block_channels * (2 ** i) for i in range(len(channels_per_layers))] bottleneck = [False, False, True, True] bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor if bi else ci for (ci, bi) in zip(channels_per_layers, bottleneck)] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = TResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def tresnet_m(**kwargs): """ TResNet-M model from 'TResNet: High Performance GPU-Dedicated Architecture,' https://arxiv.org/abs/2003.13630. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_tresnet(version="m", model_name="tresnet_m", **kwargs) def tresnet_l(**kwargs): """ TResNet-L model from 'TResNet: High Performance GPU-Dedicated Architecture,' https://arxiv.org/abs/2003.13630. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_tresnet(version="l", model_name="tresnet_l", **kwargs) def tresnet_xl(**kwargs): """ TResNet-XL model from 'TResNet: High Performance GPU-Dedicated Architecture,' https://arxiv.org/abs/2003.13630. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_tresnet(version="xl", model_name="tresnet_xl", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (tresnet_m, 224), (tresnet_l, 224), (tresnet_xl, 224), ] for model, size in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != tresnet_m or weight_count == 31389032) assert (model != tresnet_l or weight_count == 55989256) assert (model != tresnet_xl or weight_count == 78436244) batch = 1 x = torch.randn(batch, 3, size, size) y = net(x) y.sum().backward() assert (tuple(y.size()) == (batch, 1000)) if __name__ == "__main__": _test()
15,627
28.542533
117
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/fastseresnet.py
""" Fast-SE-ResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['FastSEResNet', 'fastseresnet101b'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, SEBlock from .resnet import ResBlock, ResBottleneck, ResInitBlock class FastSEResUnit(nn.Module): """ Fast-SE-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, stride, bottleneck, conv1_stride, use_se): super(FastSEResUnit, self).__init__() self.use_se = use_se self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.use_se: self.se = SEBlock( channels=out_channels, reduction=1, use_conv=False) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) if self.use_se: x = self.se(x) x = x + identity x = self.activ(x) return x class FastSEResNet(nn.Module): """ Fast-SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), num_classes=1000): super(FastSEResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 use_se = (j == 0) stage.add_module("unit{}".format(j + 1), FastSEResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride, use_se=use_se)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_fastseresnet(blocks, bottleneck=None, conv1_stride=True, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create Fast-SE-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported Fast-SE-ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = FastSEResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def fastseresnet101b(**kwargs): """ Fast-SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fastseresnet(blocks=101, conv1_stride=False, model_name="fastseresnet101b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ fastseresnet101b, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != fastseresnet101b or weight_count == 55697960) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
9,345
30.049834
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/ibnbresnet.py
""" IBN(b)-ResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. """ __all__ = ['IBNbResNet', 'ibnb_resnet50', 'ibnb_resnet101', 'ibnb_resnet152'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block from .resnet import ResBottleneck class IBNbConvBlock(nn.Module): """ IBN(b)-ResNet specific convolution block with Instance normalization and ReLU activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. activate : bool, default True Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, activate=True): super(IBNbConvBlock, self).__init__() self.activate = activate self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.inst_norm = nn.InstanceNorm2d( num_features=out_channels, affine=True) if self.activate: self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.inst_norm(x) if self.activate: x = self.activ(x) return x def ibnb_conv7x7_block(in_channels, out_channels, stride=1, padding=3, bias=False, activate=True): """ 7x7 version of the IBN(b)-ResNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 3 Padding value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. activate : bool, default True Whether activate the convolution block. """ return IBNbConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=stride, padding=padding, bias=bias, activate=activate) class IBNbResUnit(nn.Module): """ IBN(b)-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. use_inst_norm : bool Whether to use instance normalization. """ def __init__(self, in_channels, out_channels, stride, use_inst_norm): super(IBNbResUnit, self).__init__() self.use_inst_norm = use_inst_norm self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=False) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) if self.use_inst_norm: self.inst_norm = nn.InstanceNorm2d( num_features=out_channels, affine=True) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity if self.use_inst_norm: x = self.inst_norm(x) x = self.activ(x) return x class IBNbResInitBlock(nn.Module): """ IBN(b)-ResNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(IBNbResInitBlock, self).__init__() self.conv = ibnb_conv7x7_block( in_channels=in_channels, out_channels=out_channels, stride=2) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.pool(x) return x class IBNbResNet(nn.Module): """ IBN(b)-ResNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, in_channels=3, in_size=(224, 224), num_classes=1000): super(IBNbResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", IBNbResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 use_inst_norm = (i < 2) and (j == len(channels_per_stage) - 1) stage.add_module("unit{}".format(j + 1), IBNbResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, use_inst_norm=use_inst_norm)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_ibnbresnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create IBN(b)-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] else: raise ValueError("Unsupported IBN(b)-ResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = IBNbResNet( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def ibnb_resnet50(**kwargs): """ IBN(b)-ResNet-50 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ibnbresnet(blocks=50, model_name="ibnb_resnet50", **kwargs) def ibnb_resnet101(**kwargs): """ IBN(b)-ResNet-101 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ibnbresnet(blocks=101, model_name="ibnb_resnet101", **kwargs) def ibnb_resnet152(**kwargs): """ IBN(b)-ResNet-152 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ibnbresnet(blocks=152, model_name="ibnb_resnet152", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ ibnb_resnet50, ibnb_resnet101, ibnb_resnet152, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != ibnb_resnet50 or weight_count == 25558568) assert (model != ibnb_resnet101 or weight_count == 44550696) assert (model != ibnb_resnet152 or weight_count == 60194344) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
11,999
29
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/polynet.py
""" PolyNet for ImageNet-1K, implemented in PyTorch. Original paper: 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,' https://arxiv.org/abs/1611.05725. """ __all__ = ['PolyNet', 'polynet'] import os import torch.nn as nn import torch.nn.init as init from .common import ConvBlock, conv1x1_block, conv3x3_block, Concurrent, ParametricSequential, ParametricConcurrent class PolyConv(nn.Module): """ PolyNet specific convolution block. A block that is used inside poly-N (poly-2, poly-3, and so on) modules. The Convolution layer is shared between all Inception blocks inside a poly-N module. BatchNorm layers are not shared between Inception blocks and therefore the number of BatchNorm layers is equal to the number of Inception blocks inside a poly-N module. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. num_blocks : int Number of blocks (BatchNorm layers). """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, num_blocks): super(PolyConv, self).__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) self.bns = nn.ModuleList() for i in range(num_blocks): self.bns.append(nn.BatchNorm2d(num_features=out_channels)) self.activ = nn.ReLU(inplace=True) def forward(self, x, index): x = self.conv(x) x = self.bns[index](x) x = self.activ(x) return x def poly_conv1x1(in_channels, out_channels, num_blocks): """ 1x1 version of the PolyNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. num_blocks : int Number of blocks (BatchNorm layers). """ return PolyConv( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, num_blocks=num_blocks) class MaxPoolBranch(nn.Module): """ PolyNet specific max pooling branch block. """ def __init__(self): super(MaxPoolBranch, self).__init__() self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=0) def forward(self, x): x = self.pool(x) return x class Conv1x1Branch(nn.Module): """ PolyNet specific convolutional 1x1 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(Conv1x1Branch, self).__init__() self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels) def forward(self, x): x = self.conv(x) return x class Conv3x3Branch(nn.Module): """ PolyNet specific convolutional 3x3 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(Conv3x3Branch, self).__init__() self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, padding=0) def forward(self, x): x = self.conv(x) return x class ConvSeqBranch(nn.Module): """ PolyNet specific convolutional sequence branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of tuple of int List of numbers of output channels. kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int List of convolution window sizes. strides_list : list of tuple of int or tuple of tuple/list of 2 int List of strides of the convolution. padding_list : list of tuple of int or tuple of tuple/list of 2 int List of padding values for convolution layers. """ def __init__(self, in_channels, out_channels_list, kernel_size_list, strides_list, padding_list): super(ConvSeqBranch, self).__init__() assert (len(out_channels_list) == len(kernel_size_list)) assert (len(out_channels_list) == len(strides_list)) assert (len(out_channels_list) == len(padding_list)) self.conv_list = nn.Sequential() for i, (out_channels, kernel_size, strides, padding) in enumerate(zip( out_channels_list, kernel_size_list, strides_list, padding_list)): self.conv_list.add_module("conv{}".format(i + 1), ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=strides, padding=padding)) in_channels = out_channels def forward(self, x): x = self.conv_list(x) return x class PolyConvSeqBranch(nn.Module): """ PolyNet specific convolutional sequence branch block with internal PolyNet specific convolution blocks. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of tuple of int List of numbers of output channels. kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int List of convolution window sizes. strides_list : list of tuple of int or tuple of tuple/list of 2 int List of strides of the convolution. padding_list : list of tuple of int or tuple of tuple/list of 2 int List of padding values for convolution layers. num_blocks : int Number of blocks for PolyConv. """ def __init__(self, in_channels, out_channels_list, kernel_size_list, strides_list, padding_list, num_blocks): super(PolyConvSeqBranch, self).__init__() assert (len(out_channels_list) == len(kernel_size_list)) assert (len(out_channels_list) == len(strides_list)) assert (len(out_channels_list) == len(padding_list)) self.conv_list = ParametricSequential() for i, (out_channels, kernel_size, strides, padding) in enumerate(zip( out_channels_list, kernel_size_list, strides_list, padding_list)): self.conv_list.add_module("conv{}".format(i + 1), PolyConv( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=strides, padding=padding, num_blocks=num_blocks)) in_channels = out_channels def forward(self, x, index): x = self.conv_list(x, index=index) return x class TwoWayABlock(nn.Module): """ PolyNet type Inception-A block. """ def __init__(self): super(TwoWayABlock, self).__init__() in_channels = 384 self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(32, 48, 64), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 1), padding_list=(0, 1, 1))) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(32, 32), kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 1))) self.branches.add_module("branch3", Conv1x1Branch( in_channels=in_channels, out_channels=32)) self.conv = conv1x1_block( in_channels=128, out_channels=in_channels, activation=None) def forward(self, x): x = self.branches(x) x = self.conv(x) return x class TwoWayBBlock(nn.Module): """ PolyNet type Inception-B block. """ def __init__(self): super(TwoWayBBlock, self).__init__() in_channels = 1152 self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(128, 160, 192), kernel_size_list=(1, (1, 7), (7, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 3), (3, 0)))) self.branches.add_module("branch2", Conv1x1Branch( in_channels=in_channels, out_channels=192)) self.conv = conv1x1_block( in_channels=384, out_channels=in_channels, activation=None) def forward(self, x): x = self.branches(x) x = self.conv(x) return x class TwoWayCBlock(nn.Module): """ PolyNet type Inception-C block. """ def __init__(self): super(TwoWayCBlock, self).__init__() in_channels = 2048 self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 224, 256), kernel_size_list=(1, (1, 3), (3, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 1), (1, 0)))) self.branches.add_module("branch2", Conv1x1Branch( in_channels=in_channels, out_channels=192)) self.conv = conv1x1_block( in_channels=448, out_channels=in_channels, activation=None) def forward(self, x): x = self.branches(x) x = self.conv(x) return x class PolyPreBBlock(nn.Module): """ PolyNet type PolyResidual-Pre-B block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. num_blocks : int Number of blocks (BatchNorm layers). """ def __init__(self, num_blocks): super(PolyPreBBlock, self).__init__() in_channels = 1152 self.branches = ParametricConcurrent() self.branches.add_module("branch1", PolyConvSeqBranch( in_channels=in_channels, out_channels_list=(128, 160, 192), kernel_size_list=(1, (1, 7), (7, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 3), (3, 0)), num_blocks=num_blocks)) self.branches.add_module("branch2", poly_conv1x1( in_channels=in_channels, out_channels=192, num_blocks=num_blocks)) def forward(self, x, index): x = self.branches(x, index=index) return x class PolyPreCBlock(nn.Module): """ PolyNet type PolyResidual-Pre-C block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. num_blocks : int Number of blocks (BatchNorm layers). """ def __init__(self, num_blocks): super(PolyPreCBlock, self).__init__() in_channels = 2048 self.branches = ParametricConcurrent() self.branches.add_module("branch1", PolyConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 224, 256), kernel_size_list=(1, (1, 3), (3, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 1), (1, 0)), num_blocks=num_blocks)) self.branches.add_module("branch2", poly_conv1x1( in_channels=in_channels, out_channels=192, num_blocks=num_blocks)) def forward(self, x, index): x = self.branches(x, index=index) return x def poly_res_b_block(): """ PolyNet type PolyResidual-Res-B block. """ return conv1x1_block( in_channels=384, out_channels=1152, stride=1, activation=None) def poly_res_c_block(): """ PolyNet type PolyResidual-Res-C block. """ return conv1x1_block( in_channels=448, out_channels=2048, stride=1, activation=None) class MultiResidual(nn.Module): """ Base class for constructing N-way modules (2-way, 3-way, and so on). Actually it is for 2-way modules. Parameters: ---------- scale : float, default 1.0 Scale value for each residual branch. res_block : Module class Residual branch block. num_blocks : int Number of residual branches. """ def __init__(self, scale, res_block, num_blocks): super(MultiResidual, self).__init__() assert (num_blocks >= 1) self.scale = scale self.res_blocks = nn.ModuleList([res_block() for _ in range(num_blocks)]) self.activ = nn.ReLU(inplace=False) def forward(self, x): out = x for res_block in self.res_blocks: out = out + self.scale * res_block(x) out = self.activ(out) return out class PolyResidual(nn.Module): """ The other base class for constructing N-way poly-modules. Actually it is for 3-way poly-modules. Parameters: ---------- scale : float, default 1.0 Scale value for each residual branch. res_block : Module class Residual branch block. num_blocks : int Number of residual branches. pre_block : Module class Preliminary block. """ def __init__(self, scale, res_block, num_blocks, pre_block): super(PolyResidual, self).__init__() assert (num_blocks >= 1) self.scale = scale self.pre_block = pre_block(num_blocks=num_blocks) self.res_blocks = nn.ModuleList([res_block() for _ in range(num_blocks)]) self.activ = nn.ReLU(inplace=False) def forward(self, x): out = x for index, res_block in enumerate(self.res_blocks): x = self.pre_block(x, index) x = res_block(x) out = out + self.scale * x x = self.activ(x) out = self.activ(out) return out class PolyBaseUnit(nn.Module): """ PolyNet unit base class. Parameters: ---------- two_way_scale : float Scale value for 2-way stage. two_way_block : Module class Residual branch block for 2-way-stage. poly_scale : float, default 0.0 Scale value for 2-way stage. poly_res_block : Module class, default None Residual branch block for poly-stage. poly_pre_block : Module class, default None Preliminary branch block for poly-stage. """ def __init__(self, two_way_scale, two_way_block, poly_scale=0.0, poly_res_block=None, poly_pre_block=None): super(PolyBaseUnit, self).__init__() if poly_res_block is not None: assert (poly_scale != 0.0) assert (poly_pre_block is not None) self.poly = PolyResidual( scale=poly_scale, res_block=poly_res_block, num_blocks=3, pre_block=poly_pre_block) else: assert (poly_scale == 0.0) assert (poly_pre_block is None) self.poly = None self.twoway = MultiResidual( scale=two_way_scale, res_block=two_way_block, num_blocks=2) def forward(self, x): if self.poly is not None: x = self.poly(x) x = self.twoway(x) return x class PolyAUnit(PolyBaseUnit): """ PolyNet type A unit. Parameters: ---------- two_way_scale : float Scale value for 2-way stage. poly_scale : float Scale value for 2-way stage. """ def __init__(self, two_way_scale, poly_scale=0.0): super(PolyAUnit, self).__init__( two_way_scale=two_way_scale, two_way_block=TwoWayABlock) assert (poly_scale == 0.0) class PolyBUnit(PolyBaseUnit): """ PolyNet type B unit. Parameters: ---------- two_way_scale : float Scale value for 2-way stage. poly_scale : float Scale value for 2-way stage. """ def __init__(self, two_way_scale, poly_scale): super(PolyBUnit, self).__init__( two_way_scale=two_way_scale, two_way_block=TwoWayBBlock, poly_scale=poly_scale, poly_res_block=poly_res_b_block, poly_pre_block=PolyPreBBlock) class PolyCUnit(PolyBaseUnit): """ PolyNet type C unit. Parameters: ---------- two_way_scale : float Scale value for 2-way stage. poly_scale : float Scale value for 2-way stage. """ def __init__(self, two_way_scale, poly_scale): super(PolyCUnit, self).__init__( two_way_scale=two_way_scale, two_way_block=TwoWayCBlock, poly_scale=poly_scale, poly_res_block=poly_res_c_block, poly_pre_block=PolyPreCBlock) class ReductionAUnit(nn.Module): """ PolyNet type Reduction-A unit. """ def __init__(self): super(ReductionAUnit, self).__init__() in_channels = 384 self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 256, 384), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 2), padding_list=(0, 1, 0))) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(384,), kernel_size_list=(3,), strides_list=(2,), padding_list=(0,))) self.branches.add_module("branch3", MaxPoolBranch()) def forward(self, x): x = self.branches(x) return x class ReductionBUnit(nn.Module): """ PolyNet type Reduction-B unit. """ def __init__(self): super(ReductionBUnit, self).__init__() in_channels = 1152 self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 256, 256), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 2), padding_list=(0, 1, 0))) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 256), kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0))) self.branches.add_module("branch3", ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 384), kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0))) self.branches.add_module("branch4", MaxPoolBranch()) def forward(self, x): x = self.branches(x) return x class PolyBlock3a(nn.Module): """ PolyNet type Mixed-3a block. """ def __init__(self): super(PolyBlock3a, self).__init__() self.branches = Concurrent() self.branches.add_module("branch1", MaxPoolBranch()) self.branches.add_module("branch2", Conv3x3Branch( in_channels=64, out_channels=96)) def forward(self, x): x = self.branches(x) return x class PolyBlock4a(nn.Module): """ PolyNet type Mixed-4a block. """ def __init__(self): super(PolyBlock4a, self).__init__() self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=160, out_channels_list=(64, 96), kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 0))) self.branches.add_module("branch2", ConvSeqBranch( in_channels=160, out_channels_list=(64, 64, 64, 96), kernel_size_list=(1, (7, 1), (1, 7), 3), strides_list=(1, 1, 1, 1), padding_list=(0, (3, 0), (0, 3), 0))) def forward(self, x): x = self.branches(x) return x class PolyBlock5a(nn.Module): """ PolyNet type Mixed-5a block. """ def __init__(self): super(PolyBlock5a, self).__init__() self.branches = Concurrent() self.branches.add_module("branch1", MaxPoolBranch()) self.branches.add_module("branch2", Conv3x3Branch( in_channels=192, out_channels=192)) def forward(self, x): x = self.branches(x) return x class PolyInitBlock(nn.Module): """ PolyNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(PolyInitBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=32, stride=2, padding=0) self.conv2 = conv3x3_block( in_channels=32, out_channels=32, padding=0) self.conv3 = conv3x3_block( in_channels=32, out_channels=64) self.block1 = PolyBlock3a() self.block2 = PolyBlock4a() self.block3 = PolyBlock5a() def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.block1(x) x = self.block2(x) x = self.block3(x) return x class PolyNet(nn.Module): """ PolyNet model from 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,' https://arxiv.org/abs/1611.05725. Parameters: ---------- two_way_scales : list of list of floats Two way scale values for each normal unit. poly_scales : list of list of floats Three way scale values for each normal unit. dropout_rate : float, default 0.2 Fraction of the input units to drop. Must be a number between 0 and 1. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (331, 331) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, two_way_scales, poly_scales, dropout_rate=0.2, in_channels=3, in_size=(331, 331), num_classes=1000): super(PolyNet, self).__init__() self.in_size = in_size self.num_classes = num_classes normal_units = [PolyAUnit, PolyBUnit, PolyCUnit] reduction_units = [ReductionAUnit, ReductionBUnit] self.features = nn.Sequential() self.features.add_module("init_block", PolyInitBlock( in_channels=in_channels)) for i, (two_way_scales_per_stage, poly_scales_per_stage) in enumerate(zip(two_way_scales, poly_scales)): stage = nn.Sequential() for j, (two_way_scale, poly_scale) in enumerate(zip(two_way_scales_per_stage, poly_scales_per_stage)): if (j == 0) and (i != 0): unit = reduction_units[i - 1] stage.add_module("unit{}".format(j + 1), unit()) else: unit = normal_units[i] stage.add_module("unit{}".format(j + 1), unit( two_way_scale=two_way_scale, poly_scale=poly_scale)) self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=9, stride=1)) self.output = nn.Sequential() self.output.add_module("dropout", nn.Dropout(p=dropout_rate)) self.output.add_module("fc", nn.Linear( in_features=2048, out_features=num_classes)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_polynet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create PolyNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ two_way_scales = [ [1.000000, 0.992308, 0.984615, 0.976923, 0.969231, 0.961538, 0.953846, 0.946154, 0.938462, 0.930769], [0.000000, 0.915385, 0.900000, 0.884615, 0.869231, 0.853846, 0.838462, 0.823077, 0.807692, 0.792308, 0.776923], [0.000000, 0.761538, 0.746154, 0.730769, 0.715385, 0.700000]] poly_scales = [ [0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000], [0.000000, 0.923077, 0.907692, 0.892308, 0.876923, 0.861538, 0.846154, 0.830769, 0.815385, 0.800000, 0.784615], [0.000000, 0.769231, 0.753846, 0.738462, 0.723077, 0.707692]] net = PolyNet( two_way_scales=two_way_scales, poly_scales=poly_scales, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def polynet(**kwargs): """ PolyNet model from 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,' https://arxiv.org/abs/1611.05725. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_polynet(model_name="polynet", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ polynet, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != polynet or weight_count == 95366600) x = torch.randn(1, 3, 331, 331) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
28,281
28.928042
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/resnet_cifar.py
""" ResNet for CIFAR/SVHN, implemented in PyTorch. Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. """ __all__ = ['CIFARResNet', 'resnet20_cifar10', 'resnet20_cifar100', 'resnet20_svhn', 'resnet56_cifar10', 'resnet56_cifar100', 'resnet56_svhn', 'resnet110_cifar10', 'resnet110_cifar100', 'resnet110_svhn', 'resnet164bn_cifar10', 'resnet164bn_cifar100', 'resnet164bn_svhn', 'resnet272bn_cifar10', 'resnet272bn_cifar100', 'resnet272bn_svhn', 'resnet542bn_cifar10', 'resnet542bn_cifar100', 'resnet542bn_svhn', 'resnet1001_cifar10', 'resnet1001_cifar100', 'resnet1001_svhn', 'resnet1202_cifar10', 'resnet1202_cifar100', 'resnet1202_svhn'] import os import torch.nn as nn import torch.nn.init as init from .common import conv3x3_block from .resnet import ResUnit class CIFARResNet(nn.Module): """ ResNet model for CIFAR from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), ResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_resnet_cifar(num_classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ResNet model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (num_classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def resnet20_cifar10(num_classes=10, **kwargs): """ ResNet-20 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="resnet20_cifar10", **kwargs) def resnet20_cifar100(num_classes=100, **kwargs): """ ResNet-20 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="resnet20_cifar100", **kwargs) def resnet20_svhn(num_classes=10, **kwargs): """ ResNet-20 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="resnet20_svhn", **kwargs) def resnet56_cifar10(num_classes=10, **kwargs): """ ResNet-56 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="resnet56_cifar10", **kwargs) def resnet56_cifar100(num_classes=100, **kwargs): """ ResNet-56 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="resnet56_cifar100", **kwargs) def resnet56_svhn(num_classes=10, **kwargs): """ ResNet-56 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="resnet56_svhn", **kwargs) def resnet110_cifar10(num_classes=10, **kwargs): """ ResNet-110 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="resnet110_cifar10", **kwargs) def resnet110_cifar100(num_classes=100, **kwargs): """ ResNet-110 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="resnet110_cifar100", **kwargs) def resnet110_svhn(num_classes=10, **kwargs): """ ResNet-110 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="resnet110_svhn", **kwargs) def resnet164bn_cifar10(num_classes=10, **kwargs): """ ResNet-164(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="resnet164bn_cifar10", **kwargs) def resnet164bn_cifar100(num_classes=100, **kwargs): """ ResNet-164(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="resnet164bn_cifar100", **kwargs) def resnet164bn_svhn(num_classes=10, **kwargs): """ ResNet-164(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="resnet164bn_svhn", **kwargs) def resnet272bn_cifar10(num_classes=10, **kwargs): """ ResNet-272(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="resnet272bn_cifar10", **kwargs) def resnet272bn_cifar100(num_classes=100, **kwargs): """ ResNet-272(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="resnet272bn_cifar100", **kwargs) def resnet272bn_svhn(num_classes=10, **kwargs): """ ResNet-272(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="resnet272bn_svhn", **kwargs) def resnet542bn_cifar10(num_classes=10, **kwargs): """ ResNet-542(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="resnet542bn_cifar10", **kwargs) def resnet542bn_cifar100(num_classes=100, **kwargs): """ ResNet-542(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="resnet542bn_cifar100", **kwargs) def resnet542bn_svhn(num_classes=10, **kwargs): """ ResNet-542(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="resnet542bn_svhn", **kwargs) def resnet1001_cifar10(num_classes=10, **kwargs): """ ResNet-1001 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="resnet1001_cifar10", **kwargs) def resnet1001_cifar100(num_classes=100, **kwargs): """ ResNet-1001 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="resnet1001_cifar100", **kwargs) def resnet1001_svhn(num_classes=10, **kwargs): """ ResNet-1001 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="resnet1001_svhn", **kwargs) def resnet1202_cifar10(num_classes=10, **kwargs): """ ResNet-1202 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="resnet1202_cifar10", **kwargs) def resnet1202_cifar100(num_classes=100, **kwargs): """ ResNet-1202 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="resnet1202_cifar100", **kwargs) def resnet1202_svhn(num_classes=10, **kwargs): """ ResNet-1202 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="resnet1202_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (resnet20_cifar10, 10), (resnet20_cifar100, 100), (resnet20_svhn, 10), (resnet56_cifar10, 10), (resnet56_cifar100, 100), (resnet56_svhn, 10), (resnet110_cifar10, 10), (resnet110_cifar100, 100), (resnet110_svhn, 10), (resnet164bn_cifar10, 10), (resnet164bn_cifar100, 100), (resnet164bn_svhn, 10), (resnet272bn_cifar10, 10), (resnet272bn_cifar100, 100), (resnet272bn_svhn, 10), (resnet542bn_cifar10, 10), (resnet542bn_cifar100, 100), (resnet542bn_svhn, 10), (resnet1001_cifar10, 10), (resnet1001_cifar100, 100), (resnet1001_svhn, 10), (resnet1202_cifar10, 10), (resnet1202_cifar100, 100), (resnet1202_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnet20_cifar10 or weight_count == 272474) assert (model != resnet20_cifar100 or weight_count == 278324) assert (model != resnet20_svhn or weight_count == 272474) assert (model != resnet56_cifar10 or weight_count == 855770) assert (model != resnet56_cifar100 or weight_count == 861620) assert (model != resnet56_svhn or weight_count == 855770) assert (model != resnet110_cifar10 or weight_count == 1730714) assert (model != resnet110_cifar100 or weight_count == 1736564) assert (model != resnet110_svhn or weight_count == 1730714) assert (model != resnet164bn_cifar10 or weight_count == 1704154) assert (model != resnet164bn_cifar100 or weight_count == 1727284) assert (model != resnet164bn_svhn or weight_count == 1704154) assert (model != resnet272bn_cifar10 or weight_count == 2816986) assert (model != resnet272bn_cifar100 or weight_count == 2840116) assert (model != resnet272bn_svhn or weight_count == 2816986) assert (model != resnet542bn_cifar10 or weight_count == 5599066) assert (model != resnet542bn_cifar100 or weight_count == 5622196) assert (model != resnet542bn_svhn or weight_count == 5599066) assert (model != resnet1001_cifar10 or weight_count == 10328602) assert (model != resnet1001_cifar100 or weight_count == 10351732) assert (model != resnet1001_svhn or weight_count == 10328602) assert (model != resnet1202_cifar10 or weight_count == 19424026) assert (model != resnet1202_cifar100 or weight_count == 19429876) assert (model != resnet1202_svhn or weight_count == 19424026) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes)) if __name__ == "__main__": _test()
23,882
35.131619
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/nasnet.py
""" NASNet-A for ImageNet-1K, implemented in PyTorch. Original paper: 'Learning Transferable Architectures for Scalable Image Recognition,' https://arxiv.org/abs/1707.07012. """ __all__ = ['NASNet', 'nasnet_4a1056', 'nasnet_6a4032', 'nasnet_dual_path_sequential'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1, DualPathSequential class NasDualPathScheme(object): """ NASNet specific scheme of dual path response for a module in a DualPathSequential module. Parameters: ---------- can_skip_input : bool Whether can skip input for some modules. """ def __init__(self, can_skip_input): super(NasDualPathScheme, self).__init__() self.can_skip_input = can_skip_input """ Scheme function. Parameters: ---------- module : nn.Module A module. x : Tensor Current processed tensor. x_prev : Tensor Previous processed tensor. Returns: ------- x_next : Tensor Next processed tensor. x : Tensor Current processed tensor. """ def __call__(self, module, x, x_prev): x_next = module(x, x_prev) if type(x_next) == tuple: x_next, x = x_next if self.can_skip_input and hasattr(module, 'skip_input') and module.skip_input: x = x_prev return x_next, x def nasnet_dual_path_scheme_ordinal(module, x, _): """ NASNet specific scheme of dual path response for an ordinal module with dual inputs/outputs in a DualPathSequential module. Parameters: ---------- module : nn.Module A module. x : Tensor Current processed tensor. Returns: ------- x_next : Tensor Next processed tensor. x : Tensor Current processed tensor. """ return module(x), x def nasnet_dual_path_sequential(return_two=True, first_ordinals=0, last_ordinals=0, can_skip_input=False): """ NASNet specific dual path sequential container. Parameters: ---------- return_two : bool, default True Whether to return two output after execution. first_ordinals : int, default 0 Number of the first modules with single input/output. last_ordinals : int, default 0 Number of the final modules with single input/output. dual_path_scheme : function Scheme of dual path response for a module. dual_path_scheme_ordinal : function Scheme of dual path response for an ordinal module. can_skip_input : bool, default False Whether can skip input for some modules. """ return DualPathSequential( return_two=return_two, first_ordinals=first_ordinals, last_ordinals=last_ordinals, dual_path_scheme=NasDualPathScheme(can_skip_input=can_skip_input), dual_path_scheme_ordinal=nasnet_dual_path_scheme_ordinal) def nasnet_batch_norm(channels): """ NASNet specific Batch normalization layer. Parameters: ---------- channels : int Number of channels in input data. """ return nn.BatchNorm2d( num_features=channels, eps=0.001, momentum=0.1, affine=True) def nasnet_avgpool1x1_s2(): """ NASNet specific 1x1 Average pooling layer with stride 2. """ return nn.AvgPool2d( kernel_size=1, stride=2, count_include_pad=False) def nasnet_avgpool3x3_s1(): """ NASNet specific 3x3 Average pooling layer with stride 1. """ return nn.AvgPool2d( kernel_size=3, stride=1, padding=1, count_include_pad=False) def nasnet_avgpool3x3_s2(): """ NASNet specific 3x3 Average pooling layer with stride 2. """ return nn.AvgPool2d( kernel_size=3, stride=2, padding=1, count_include_pad=False) class NasMaxPoolBlock(nn.Module): """ NASNet specific Max pooling layer with extra padding. Parameters: ---------- extra_padding : bool, default False Whether to use extra padding. """ def __init__(self, extra_padding=False): super(NasMaxPoolBlock, self).__init__() self.extra_padding = extra_padding self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) if self.extra_padding: self.pad = nn.ZeroPad2d(padding=(1, 0, 1, 0)) def forward(self, x): if self.extra_padding: x = self.pad(x) x = self.pool(x) if self.extra_padding: x = x[:, :, 1:, 1:].contiguous() return x class NasAvgPoolBlock(nn.Module): """ NASNet specific 3x3 Average pooling layer with extra padding. Parameters: ---------- extra_padding : bool, default False Whether to use extra padding. """ def __init__(self, extra_padding=False): super(NasAvgPoolBlock, self).__init__() self.extra_padding = extra_padding self.pool = nn.AvgPool2d( kernel_size=3, stride=2, padding=1, count_include_pad=False) if self.extra_padding: self.pad = nn.ZeroPad2d(padding=(1, 0, 1, 0)) def forward(self, x): if self.extra_padding: x = self.pad(x) x = self.pool(x) if self.extra_padding: x = x[:, :, 1:, 1:].contiguous() return x class NasConv(nn.Module): """ NASNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. groups : int Number of groups. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, groups): super(NasConv, self).__init__() self.activ = nn.ReLU() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False) self.bn = nasnet_batch_norm(channels=out_channels) def forward(self, x): x = self.activ(x) x = self.conv(x) x = self.bn(x) return x def nas_conv1x1(in_channels, out_channels): """ 1x1 version of the NASNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ return NasConv( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, groups=1) class DwsConv(nn.Module): """ Standard depthwise separable convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. bias : bool, default False Whether the layers use a bias vector. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False): super(DwsConv, self).__init__() self.dw_conv = nn.Conv2d( in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=in_channels, bias=bias) self.pw_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, bias=bias) def forward(self, x): x = self.dw_conv(x) x = self.pw_conv(x) return x class NasDwsConv(nn.Module): """ NASNet specific depthwise separable convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. extra_padding : bool, default False Whether to use extra padding. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, extra_padding=False): super(NasDwsConv, self).__init__() self.extra_padding = extra_padding self.activ = nn.ReLU() self.conv = DwsConv( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) self.bn = nasnet_batch_norm(channels=out_channels) if self.extra_padding: self.pad = nn.ZeroPad2d(padding=(1, 0, 1, 0)) def forward(self, x): x = self.activ(x) if self.extra_padding: x = self.pad(x) x = self.conv(x) if self.extra_padding: x = x[:, :, 1:, 1:].contiguous() x = self.bn(x) return x class DwsBranch(nn.Module): """ NASNet specific block with depthwise separable convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, extra_padding=False, stem=False): super(DwsBranch, self).__init__() assert (not stem) or (not extra_padding) mid_channels = out_channels if stem else in_channels self.conv1 = NasDwsConv( in_channels=in_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=stride, padding=padding, extra_padding=extra_padding) self.conv2 = NasDwsConv( in_channels=mid_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1, padding=padding) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x def dws_branch_k3_s1_p1(in_channels, out_channels, extra_padding=False): """ 3x3/1/1 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, extra_padding=extra_padding) def dws_branch_k5_s1_p2(in_channels, out_channels, extra_padding=False): """ 5x5/1/2 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, kernel_size=5, stride=1, padding=2, extra_padding=extra_padding) def dws_branch_k5_s2_p2(in_channels, out_channels, extra_padding=False, stem=False): """ 5x5/2/2 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, kernel_size=5, stride=2, padding=2, extra_padding=extra_padding, stem=stem) def dws_branch_k7_s2_p3(in_channels, out_channels, extra_padding=False, stem=False): """ 7x7/2/3 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, padding=3, extra_padding=extra_padding, stem=stem) class NasPathBranch(nn.Module): """ NASNet specific `path` branch (auxiliary block). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. """ def __init__(self, in_channels, out_channels, extra_padding=False): super(NasPathBranch, self).__init__() self.extra_padding = extra_padding self.avgpool = nasnet_avgpool1x1_s2() self.conv = conv1x1( in_channels=in_channels, out_channels=out_channels) if self.extra_padding: self.pad = nn.ZeroPad2d(padding=(0, 1, 0, 1)) def forward(self, x): if self.extra_padding: x = self.pad(x) x = x[:, :, 1:, 1:].contiguous() x = self.avgpool(x) x = self.conv(x) return x class NasPathBlock(nn.Module): """ NASNet specific `path` block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(NasPathBlock, self).__init__() mid_channels = out_channels // 2 self.activ = nn.ReLU() self.path1 = NasPathBranch( in_channels=in_channels, out_channels=mid_channels) self.path2 = NasPathBranch( in_channels=in_channels, out_channels=mid_channels, extra_padding=True) self.bn = nasnet_batch_norm(channels=out_channels) def forward(self, x): x = self.activ(x) x1 = self.path1(x) x2 = self.path2(x) x = torch.cat((x1, x2), dim=1) x = self.bn(x) return x class Stem1Unit(nn.Module): """ NASNet Stem1 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(Stem1Unit, self).__init__() mid_channels = out_channels // 4 self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.comb0_left = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels) self.comb0_right = dws_branch_k7_s2_p3( in_channels=in_channels, out_channels=mid_channels, stem=True) self.comb1_left = NasMaxPoolBlock(extra_padding=False) self.comb1_right = dws_branch_k7_s2_p3( in_channels=in_channels, out_channels=mid_channels, stem=True) self.comb2_left = nasnet_avgpool3x3_s2() self.comb2_right = dws_branch_k5_s2_p2( in_channels=in_channels, out_channels=mid_channels, stem=True) self.comb3_right = nasnet_avgpool3x3_s1() self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels) self.comb4_right = NasMaxPoolBlock(extra_padding=False) def forward(self, x, _=None): x_left = self.conv1x1(x) x_right = x x0 = self.comb0_left(x_left) + self.comb0_right(x_right) x1 = self.comb1_left(x_left) + self.comb1_right(x_right) x2 = self.comb2_left(x_left) + self.comb2_right(x_right) x3 = x1 + self.comb3_right(x0) x4 = self.comb4_left(x0) + self.comb4_right(x_left) x_out = torch.cat((x1, x2, x3, x4), dim=1) return x_out class Stem2Unit(nn.Module): """ NASNet Stem2 unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. extra_padding : bool Whether to use extra padding. """ def __init__(self, in_channels, prev_in_channels, out_channels, extra_padding): super(Stem2Unit, self).__init__() mid_channels = out_channels // 4 self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.path = NasPathBlock( in_channels=prev_in_channels, out_channels=mid_channels) self.comb0_left = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb0_right = dws_branch_k7_s2_p3( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb1_left = NasMaxPoolBlock(extra_padding=extra_padding) self.comb1_right = dws_branch_k7_s2_p3( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb2_left = NasAvgPoolBlock(extra_padding=extra_padding) self.comb2_right = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb3_right = nasnet_avgpool3x3_s1() self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb4_right = NasMaxPoolBlock(extra_padding=extra_padding) def forward(self, x, x_prev): x_left = self.conv1x1(x) x_right = self.path(x_prev) x0 = self.comb0_left(x_left) + self.comb0_right(x_right) x1 = self.comb1_left(x_left) + self.comb1_right(x_right) x2 = self.comb2_left(x_left) + self.comb2_right(x_right) x3 = x1 + self.comb3_right(x0) x4 = self.comb4_left(x0) + self.comb4_right(x_left) x_out = torch.cat((x1, x2, x3, x4), dim=1) return x_out class FirstUnit(nn.Module): """ NASNet First unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. """ def __init__(self, in_channels, prev_in_channels, out_channels): super(FirstUnit, self).__init__() mid_channels = out_channels // 6 self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.path = NasPathBlock( in_channels=prev_in_channels, out_channels=mid_channels) self.comb0_left = dws_branch_k5_s1_p2( in_channels=mid_channels, out_channels=mid_channels) self.comb0_right = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels) self.comb1_left = dws_branch_k5_s1_p2( in_channels=mid_channels, out_channels=mid_channels) self.comb1_right = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels) self.comb2_left = nasnet_avgpool3x3_s1() self.comb3_left = nasnet_avgpool3x3_s1() self.comb3_right = nasnet_avgpool3x3_s1() self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels) def forward(self, x, x_prev): x_left = self.conv1x1(x) x_right = self.path(x_prev) x0 = self.comb0_left(x_left) + self.comb0_right(x_right) x1 = self.comb1_left(x_right) + self.comb1_right(x_right) x2 = self.comb2_left(x_left) + x_right x3 = self.comb3_left(x_right) + self.comb3_right(x_right) x4 = self.comb4_left(x_left) + x_left x_out = torch.cat((x_right, x0, x1, x2, x3, x4), dim=1) return x_out class NormalUnit(nn.Module): """ NASNet Normal unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. """ def __init__(self, in_channels, prev_in_channels, out_channels): super(NormalUnit, self).__init__() mid_channels = out_channels // 6 self.conv1x1_prev = nas_conv1x1( in_channels=prev_in_channels, out_channels=mid_channels) self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.comb0_left = dws_branch_k5_s1_p2( in_channels=mid_channels, out_channels=mid_channels) self.comb0_right = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels) self.comb1_left = dws_branch_k5_s1_p2( in_channels=mid_channels, out_channels=mid_channels) self.comb1_right = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels) self.comb2_left = nasnet_avgpool3x3_s1() self.comb3_left = nasnet_avgpool3x3_s1() self.comb3_right = nasnet_avgpool3x3_s1() self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels) def forward(self, x, x_prev): x_left = self.conv1x1(x) x_right = self.conv1x1_prev(x_prev) x0 = self.comb0_left(x_left) + self.comb0_right(x_right) x1 = self.comb1_left(x_right) + self.comb1_right(x_right) x2 = self.comb2_left(x_left) + x_right x3 = self.comb3_left(x_right) + self.comb3_right(x_right) x4 = self.comb4_left(x_left) + x_left x_out = torch.cat((x_right, x0, x1, x2, x3, x4), dim=1) return x_out class ReductionBaseUnit(nn.Module): """ NASNet Reduction base unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. extra_padding : bool, default True Whether to use extra padding. """ def __init__(self, in_channels, prev_in_channels, out_channels, extra_padding=True): super(ReductionBaseUnit, self).__init__() self.skip_input = True mid_channels = out_channels // 4 self.conv1x1_prev = nas_conv1x1( in_channels=prev_in_channels, out_channels=mid_channels) self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.comb0_left = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb0_right = dws_branch_k7_s2_p3( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb1_left = NasMaxPoolBlock(extra_padding=extra_padding) self.comb1_right = dws_branch_k7_s2_p3( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb2_left = NasAvgPoolBlock(extra_padding=extra_padding) self.comb2_right = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb3_right = nasnet_avgpool3x3_s1() self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding) self.comb4_right = NasMaxPoolBlock(extra_padding=extra_padding) def forward(self, x, x_prev): x_left = self.conv1x1(x) x_right = self.conv1x1_prev(x_prev) x0 = self.comb0_left(x_left) + self.comb0_right(x_right) x1 = self.comb1_left(x_left) + self.comb1_right(x_right) x2 = self.comb2_left(x_left) + self.comb2_right(x_right) x3 = x1 + self.comb3_right(x0) x4 = self.comb4_left(x0) + self.comb4_right(x_left) x_out = torch.cat((x1, x2, x3, x4), dim=1) return x_out class Reduction1Unit(ReductionBaseUnit): """ NASNet Reduction1 unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. """ def __init__(self, in_channels, prev_in_channels, out_channels): super(Reduction1Unit, self).__init__( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, extra_padding=True) class Reduction2Unit(ReductionBaseUnit): """ NASNet Reduction2 unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. extra_padding : bool Whether to use extra padding. """ def __init__(self, in_channels, prev_in_channels, out_channels, extra_padding): super(Reduction2Unit, self).__init__( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, extra_padding=extra_padding) class NASNetInitBlock(nn.Module): """ NASNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(NASNetInitBlock, self).__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=0, bias=False) self.bn = nasnet_batch_norm(channels=out_channels) def forward(self, x): x = self.conv(x) x = self.bn(x) return x class NASNet(nn.Module): """ NASNet-A model from 'Learning Transferable Architectures for Scalable Image Recognition,' https://arxiv.org/abs/1707.07012. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. stem_blocks_channels : list of 2 int Number of output channels for the Stem units. final_pool_size : int Size of the pooling windows for final pool. extra_padding : bool Whether to use extra padding. skip_reduction_layer_input : bool Whether to skip the reduction layers when calculating the previous layer to connect to. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, stem_blocks_channels, final_pool_size, extra_padding, skip_reduction_layer_input, in_channels=3, in_size=(224, 224), num_classes=1000): super(NASNet, self).__init__() self.in_size = in_size self.num_classes = num_classes reduction_units = [Reduction1Unit, Reduction2Unit] self.features = nasnet_dual_path_sequential( return_two=False, first_ordinals=1, last_ordinals=2) self.features.add_module("init_block", NASNetInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels out_channels = stem_blocks_channels[0] self.features.add_module("stem1_unit", Stem1Unit( in_channels=in_channels, out_channels=out_channels)) prev_in_channels = in_channels in_channels = out_channels out_channels = stem_blocks_channels[1] self.features.add_module("stem2_unit", Stem2Unit( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, extra_padding=extra_padding)) prev_in_channels = in_channels in_channels = out_channels for i, channels_per_stage in enumerate(channels): stage = nasnet_dual_path_sequential(can_skip_input=skip_reduction_layer_input) for j, out_channels in enumerate(channels_per_stage): if (j == 0) and (i != 0): unit = reduction_units[i - 1] elif ((i == 0) and (j == 0)) or ((i != 0) and (j == 1)): unit = FirstUnit else: unit = NormalUnit if unit == Reduction2Unit: stage.add_module("unit{}".format(j + 1), Reduction2Unit( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, extra_padding=extra_padding)) else: stage.add_module("unit{}".format(j + 1), unit( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels)) prev_in_channels = in_channels in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("activ", nn.ReLU()) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=final_pool_size, stride=1)) self.output = nn.Sequential() self.output.add_module("dropout", nn.Dropout(p=0.5)) self.output.add_module("fc", nn.Linear( in_features=in_channels, out_features=num_classes)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_nasnet(repeat, penultimate_filters, init_block_channels, final_pool_size, extra_padding, skip_reduction_layer_input, in_size, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create NASNet-A model with specific parameters. Parameters: ---------- repeat : int NNumber of cell repeats. penultimate_filters : int Number of filters in the penultimate layer of the network. init_block_channels : int Number of output channels for the initial unit. final_pool_size : int Size of the pooling windows for final pool. extra_padding : bool Whether to use extra padding. skip_reduction_layer_input : bool Whether to skip the reduction layers when calculating the previous layer to connect to. in_size : tuple of two ints Spatial size of the expected input image. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ stem_blocks_channels = [1, 2] reduct_channels = [[], [8], [16]] norm_channels = [6, 12, 24] channels = [rci + [nci] * repeat for rci, nci in zip(reduct_channels, norm_channels)] base_channel_chunk = penultimate_filters // channels[-1][-1] stem_blocks_channels = [(ci * base_channel_chunk) for ci in stem_blocks_channels] channels = [[(cij * base_channel_chunk) for cij in ci] for ci in channels] net = NASNet( channels=channels, init_block_channels=init_block_channels, stem_blocks_channels=stem_blocks_channels, final_pool_size=final_pool_size, extra_padding=extra_padding, skip_reduction_layer_input=skip_reduction_layer_input, in_size=in_size, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def nasnet_4a1056(**kwargs): """ NASNet-A 4@1056 (NASNet-A-Mobile) model from 'Learning Transferable Architectures for Scalable Image Recognition,' https://arxiv.org/abs/1707.07012. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_nasnet( repeat=4, penultimate_filters=1056, init_block_channels=32, final_pool_size=7, extra_padding=True, skip_reduction_layer_input=False, in_size=(224, 224), model_name="nasnet_4a1056", **kwargs) def nasnet_6a4032(**kwargs): """ NASNet-A 6@4032 (NASNet-A-Large) model from 'Learning Transferable Architectures for Scalable Image Recognition,' https://arxiv.org/abs/1707.07012. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_nasnet( repeat=6, penultimate_filters=4032, init_block_channels=96, final_pool_size=11, extra_padding=False, skip_reduction_layer_input=True, in_size=(331, 331), model_name="nasnet_6a4032", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ nasnet_4a1056, nasnet_6a4032, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != nasnet_4a1056 or weight_count == 5289978) assert (model != nasnet_6a4032 or weight_count == 88753150) x = torch.randn(1, 3, net.in_size[0], net.in_size[1]) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
38,588
28.502294
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/resnext_cifar.py
""" ResNeXt for CIFAR/SVHN, implemented in PyTorch. Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. """ __all__ = ['CIFARResNeXt', 'resnext20_16x4d_cifar10', 'resnext20_16x4d_cifar100', 'resnext20_16x4d_svhn', 'resnext20_32x2d_cifar10', 'resnext20_32x2d_cifar100', 'resnext20_32x2d_svhn', 'resnext20_32x4d_cifar10', 'resnext20_32x4d_cifar100', 'resnext20_32x4d_svhn', 'resnext29_32x4d_cifar10', 'resnext29_32x4d_cifar100', 'resnext29_32x4d_svhn', 'resnext29_16x64d_cifar10', 'resnext29_16x64d_cifar100', 'resnext29_16x64d_svhn', 'resnext272_1x64d_cifar10', 'resnext272_1x64d_cifar100', 'resnext272_1x64d_svhn', 'resnext272_2x32d_cifar10', 'resnext272_2x32d_cifar100', 'resnext272_2x32d_svhn'] import os import torch.nn as nn import torch.nn.init as init from .common import conv3x3_block from .resnext import ResNeXtUnit class CIFARResNeXt(nn.Module): """ ResNeXt model for CIFAR from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARResNeXt, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), ResNeXtUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_resnext_cifar(num_classes, blocks, cardinality, bottleneck_width, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ ResNeXt model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification classes. blocks : int Number of blocks. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (blocks - 2) % 9 == 0 layers = [(blocks - 2) // 9] * 3 channels_per_layers = [256, 512, 1024] init_block_channels = 64 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = CIFARResNeXt( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def resnext20_16x4d_cifar10(num_classes=10, **kwargs): """ ResNeXt-20 (16x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=20, cardinality=16, bottleneck_width=4, model_name="resnext20_16x4d_cifar10", **kwargs) def resnext20_16x4d_cifar100(num_classes=100, **kwargs): """ ResNeXt-20 (16x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=20, cardinality=16, bottleneck_width=4, model_name="resnext20_16x4d_cifar100", **kwargs) def resnext20_16x4d_svhn(num_classes=10, **kwargs): """ ResNeXt-20 (16x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=20, cardinality=16, bottleneck_width=4, model_name="resnext20_16x4d_svhn", **kwargs) def resnext20_32x2d_cifar10(num_classes=10, **kwargs): """ ResNeXt-20 (32x2d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=20, cardinality=32, bottleneck_width=2, model_name="resnext20_32x2d_cifar10", **kwargs) def resnext20_32x2d_cifar100(num_classes=100, **kwargs): """ ResNeXt-20 (32x2d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=20, cardinality=32, bottleneck_width=2, model_name="resnext20_32x2d_cifar100", **kwargs) def resnext20_32x2d_svhn(num_classes=10, **kwargs): """ ResNeXt-20 (32x2d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=20, cardinality=32, bottleneck_width=2, model_name="resnext20_32x2d_svhn", **kwargs) def resnext20_32x4d_cifar10(num_classes=10, **kwargs): """ ResNeXt-20 (32x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=20, cardinality=32, bottleneck_width=4, model_name="resnext20_32x4d_cifar10", **kwargs) def resnext20_32x4d_cifar100(num_classes=100, **kwargs): """ ResNeXt-20 (32x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=20, cardinality=32, bottleneck_width=4, model_name="resnext20_32x4d_cifar100", **kwargs) def resnext20_32x4d_svhn(num_classes=10, **kwargs): """ ResNeXt-20 (32x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=20, cardinality=32, bottleneck_width=4, model_name="resnext20_32x4d_svhn", **kwargs) def resnext29_32x4d_cifar10(num_classes=10, **kwargs): """ ResNeXt-29 (32x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=29, cardinality=32, bottleneck_width=4, model_name="resnext29_32x4d_cifar10", **kwargs) def resnext29_32x4d_cifar100(num_classes=100, **kwargs): """ ResNeXt-29 (32x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=29, cardinality=32, bottleneck_width=4, model_name="resnext29_32x4d_cifar100", **kwargs) def resnext29_32x4d_svhn(num_classes=10, **kwargs): """ ResNeXt-29 (32x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=29, cardinality=32, bottleneck_width=4, model_name="resnext29_32x4d_svhn", **kwargs) def resnext29_16x64d_cifar10(num_classes=10, **kwargs): """ ResNeXt-29 (16x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=29, cardinality=16, bottleneck_width=64, model_name="resnext29_16x64d_cifar10", **kwargs) def resnext29_16x64d_cifar100(num_classes=100, **kwargs): """ ResNeXt-29 (16x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=29, cardinality=16, bottleneck_width=64, model_name="resnext29_16x64d_cifar100", **kwargs) def resnext29_16x64d_svhn(num_classes=10, **kwargs): """ ResNeXt-29 (16x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=29, cardinality=16, bottleneck_width=64, model_name="resnext29_16x64d_svhn", **kwargs) def resnext272_1x64d_cifar10(num_classes=10, **kwargs): """ ResNeXt-272 (1x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=272, cardinality=1, bottleneck_width=64, model_name="resnext272_1x64d_cifar10", **kwargs) def resnext272_1x64d_cifar100(num_classes=100, **kwargs): """ ResNeXt-272 (1x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=272, cardinality=1, bottleneck_width=64, model_name="resnext272_1x64d_cifar100", **kwargs) def resnext272_1x64d_svhn(num_classes=10, **kwargs): """ ResNeXt-272 (1x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=272, cardinality=1, bottleneck_width=64, model_name="resnext272_1x64d_svhn", **kwargs) def resnext272_2x32d_cifar10(num_classes=10, **kwargs): """ ResNeXt-272 (2x32d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=272, cardinality=2, bottleneck_width=32, model_name="resnext272_2x32d_cifar10", **kwargs) def resnext272_2x32d_cifar100(num_classes=100, **kwargs): """ ResNeXt-272 (2x32d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=272, cardinality=2, bottleneck_width=32, model_name="resnext272_2x32d_cifar100", **kwargs) def resnext272_2x32d_svhn(num_classes=10, **kwargs): """ ResNeXt-272 (2x32d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnext_cifar(num_classes=num_classes, blocks=272, cardinality=2, bottleneck_width=32, model_name="resnext272_2x32d_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (resnext20_16x4d_cifar10, 10), (resnext20_16x4d_cifar100, 100), (resnext20_16x4d_svhn, 10), (resnext20_32x2d_cifar10, 10), (resnext20_32x2d_cifar100, 100), (resnext20_32x2d_svhn, 10), (resnext20_32x4d_cifar10, 10), (resnext20_32x4d_cifar100, 100), (resnext20_32x4d_svhn, 10), (resnext29_32x4d_cifar10, 10), (resnext29_32x4d_cifar100, 100), (resnext29_32x4d_svhn, 10), (resnext29_16x64d_cifar10, 10), (resnext29_16x64d_cifar100, 100), (resnext29_16x64d_svhn, 10), (resnext272_1x64d_cifar10, 10), (resnext272_1x64d_cifar100, 100), (resnext272_1x64d_svhn, 10), (resnext272_2x32d_cifar10, 10), (resnext272_2x32d_cifar100, 100), (resnext272_2x32d_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnext20_16x4d_cifar10 or weight_count == 1995082) assert (model != resnext20_16x4d_cifar100 or weight_count == 2087332) assert (model != resnext20_16x4d_svhn or weight_count == 1995082) assert (model != resnext20_32x2d_cifar10 or weight_count == 1946698) assert (model != resnext20_32x2d_cifar100 or weight_count == 2038948) assert (model != resnext20_32x2d_svhn or weight_count == 1946698) assert (model != resnext20_32x4d_cifar10 or weight_count == 3295562) assert (model != resnext20_32x4d_cifar100 or weight_count == 3387812) assert (model != resnext20_32x4d_svhn or weight_count == 3295562) assert (model != resnext29_32x4d_cifar10 or weight_count == 4775754) assert (model != resnext29_32x4d_cifar100 or weight_count == 4868004) assert (model != resnext29_32x4d_svhn or weight_count == 4775754) assert (model != resnext29_16x64d_cifar10 or weight_count == 68155210) assert (model != resnext29_16x64d_cifar100 or weight_count == 68247460) assert (model != resnext29_16x64d_svhn or weight_count == 68155210) assert (model != resnext272_1x64d_cifar10 or weight_count == 44540746) assert (model != resnext272_1x64d_cifar100 or weight_count == 44632996) assert (model != resnext272_1x64d_svhn or weight_count == 44540746) assert (model != resnext272_2x32d_cifar10 or weight_count == 32928586) assert (model != resnext272_2x32d_cifar100 or weight_count == 33020836) assert (model != resnext272_2x32d_svhn or weight_count == 32928586) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes)) if __name__ == "__main__": _test()
23,083
37.092409
116
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/densenet_cifar.py
""" DenseNet for CIFAR/SVHN, implemented in PyTorch. Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. """ __all__ = ['CIFARDenseNet', 'densenet40_k12_cifar10', 'densenet40_k12_cifar100', 'densenet40_k12_svhn', 'densenet40_k12_bc_cifar10', 'densenet40_k12_bc_cifar100', 'densenet40_k12_bc_svhn', 'densenet40_k24_bc_cifar10', 'densenet40_k24_bc_cifar100', 'densenet40_k24_bc_svhn', 'densenet40_k36_bc_cifar10', 'densenet40_k36_bc_cifar100', 'densenet40_k36_bc_svhn', 'densenet100_k12_cifar10', 'densenet100_k12_cifar100', 'densenet100_k12_svhn', 'densenet100_k24_cifar10', 'densenet100_k24_cifar100', 'densenet100_k24_svhn', 'densenet100_k12_bc_cifar10', 'densenet100_k12_bc_cifar100', 'densenet100_k12_bc_svhn', 'densenet190_k40_bc_cifar10', 'densenet190_k40_bc_cifar100', 'densenet190_k40_bc_svhn', 'densenet250_k24_bc_cifar10', 'densenet250_k24_bc_cifar100', 'densenet250_k24_bc_svhn'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv3x3, pre_conv3x3_block from .preresnet import PreResActivation from .densenet import DenseUnit, TransitionBlock class DenseSimpleUnit(nn.Module): """ DenseNet simple unit for CIFAR. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels, dropout_rate): super(DenseSimpleUnit, self).__init__() self.use_dropout = (dropout_rate != 0.0) inc_channels = out_channels - in_channels self.conv = pre_conv3x3_block( in_channels=in_channels, out_channels=inc_channels) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x): identity = x x = self.conv(x) if self.use_dropout: x = self.dropout(x) x = torch.cat((identity, x), dim=1) return x class CIFARDenseNet(nn.Module): """ DenseNet model for CIFAR from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, dropout_rate=0.0, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARDenseNet, self).__init__() self.in_size = in_size self.num_classes = num_classes unit_class = DenseUnit if bottleneck else DenseSimpleUnit self.features = nn.Sequential() self.features.add_module("init_block", conv3x3( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() if i != 0: stage.add_module("trans{}".format(i + 1), TransitionBlock( in_channels=in_channels, out_channels=(in_channels // 2))) in_channels = in_channels // 2 for j, out_channels in enumerate(channels_per_stage): stage.add_module("unit{}".format(j + 1), unit_class( in_channels=in_channels, out_channels=out_channels, dropout_rate=dropout_rate)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_densenet_cifar(num_classes, blocks, growth_rate, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DenseNet model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification classes. blocks : int Number of blocks. growth_rate : int Growth rate. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (num_classes in [10, 100]) if bottleneck: assert ((blocks - 4) % 6 == 0) layers = [(blocks - 4) // 6] * 3 else: assert ((blocks - 4) % 3 == 0) layers = [(blocks - 4) // 3] * 3 init_block_channels = 2 * growth_rate from functools import reduce channels = reduce( lambda xi, yi: xi + [reduce( lambda xj, yj: xj + [xj[-1] + yj], [growth_rate] * yi, [xi[-1][-1] // 2])[1:]], layers, [[init_block_channels * 2]])[1:] net = CIFARDenseNet( channels=channels, init_block_channels=init_block_channels, num_classes=num_classes, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def densenet40_k12_cifar10(num_classes=10, **kwargs): """ DenseNet-40 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=40, growth_rate=12, bottleneck=False, model_name="densenet40_k12_cifar10", **kwargs) def densenet40_k12_cifar100(num_classes=100, **kwargs): """ DenseNet-40 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=40, growth_rate=12, bottleneck=False, model_name="densenet40_k12_cifar100", **kwargs) def densenet40_k12_svhn(num_classes=10, **kwargs): """ DenseNet-40 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=40, growth_rate=12, bottleneck=False, model_name="densenet40_k12_svhn", **kwargs) def densenet40_k12_bc_cifar10(num_classes=10, **kwargs): """ DenseNet-BC-40 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=40, growth_rate=12, bottleneck=True, model_name="densenet40_k12_bc_cifar10", **kwargs) def densenet40_k12_bc_cifar100(num_classes=100, **kwargs): """ DenseNet-BC-40 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=40, growth_rate=12, bottleneck=True, model_name="densenet40_k12_bc_cifar100", **kwargs) def densenet40_k12_bc_svhn(num_classes=10, **kwargs): """ DenseNet-BC-40 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=40, growth_rate=12, bottleneck=True, model_name="densenet40_k12_bc_svhn", **kwargs) def densenet40_k24_bc_cifar10(num_classes=10, **kwargs): """ DenseNet-BC-40 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=40, growth_rate=24, bottleneck=True, model_name="densenet40_k24_bc_cifar10", **kwargs) def densenet40_k24_bc_cifar100(num_classes=100, **kwargs): """ DenseNet-BC-40 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=40, growth_rate=24, bottleneck=True, model_name="densenet40_k24_bc_cifar100", **kwargs) def densenet40_k24_bc_svhn(num_classes=10, **kwargs): """ DenseNet-BC-40 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=40, growth_rate=24, bottleneck=True, model_name="densenet40_k24_bc_svhn", **kwargs) def densenet40_k36_bc_cifar10(num_classes=10, **kwargs): """ DenseNet-BC-40 (k=36) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=40, growth_rate=36, bottleneck=True, model_name="densenet40_k36_bc_cifar10", **kwargs) def densenet40_k36_bc_cifar100(num_classes=100, **kwargs): """ DenseNet-BC-40 (k=36) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=40, growth_rate=36, bottleneck=True, model_name="densenet40_k36_bc_cifar100", **kwargs) def densenet40_k36_bc_svhn(num_classes=10, **kwargs): """ DenseNet-BC-40 (k=36) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=40, growth_rate=36, bottleneck=True, model_name="densenet40_k36_bc_svhn", **kwargs) def densenet100_k12_cifar10(num_classes=10, **kwargs): """ DenseNet-100 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=100, growth_rate=12, bottleneck=False, model_name="densenet100_k12_cifar10", **kwargs) def densenet100_k12_cifar100(num_classes=100, **kwargs): """ DenseNet-100 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=100, growth_rate=12, bottleneck=False, model_name="densenet100_k12_cifar100", **kwargs) def densenet100_k12_svhn(num_classes=10, **kwargs): """ DenseNet-100 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=100, growth_rate=12, bottleneck=False, model_name="densenet100_k12_svhn", **kwargs) def densenet100_k24_cifar10(num_classes=10, **kwargs): """ DenseNet-100 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=100, growth_rate=24, bottleneck=False, model_name="densenet100_k24_cifar10", **kwargs) def densenet100_k24_cifar100(num_classes=100, **kwargs): """ DenseNet-100 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=100, growth_rate=24, bottleneck=False, model_name="densenet100_k24_cifar100", **kwargs) def densenet100_k24_svhn(num_classes=10, **kwargs): """ DenseNet-100 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=100, growth_rate=24, bottleneck=False, model_name="densenet100_k24_svhn", **kwargs) def densenet100_k12_bc_cifar10(num_classes=10, **kwargs): """ DenseNet-BC-100 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=100, growth_rate=12, bottleneck=True, model_name="densenet100_k12_bc_cifar10", **kwargs) def densenet100_k12_bc_cifar100(num_classes=100, **kwargs): """ DenseNet-BC-100 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=100, growth_rate=12, bottleneck=True, model_name="densenet100_k12_bc_cifar100", **kwargs) def densenet100_k12_bc_svhn(num_classes=10, **kwargs): """ DenseNet-BC-100 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=100, growth_rate=12, bottleneck=True, model_name="densenet100_k12_bc_svhn", **kwargs) def densenet190_k40_bc_cifar10(num_classes=10, **kwargs): """ DenseNet-BC-190 (k=40) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=190, growth_rate=40, bottleneck=True, model_name="densenet190_k40_bc_cifar10", **kwargs) def densenet190_k40_bc_cifar100(num_classes=100, **kwargs): """ DenseNet-BC-190 (k=40) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=190, growth_rate=40, bottleneck=True, model_name="densenet190_k40_bc_cifar100", **kwargs) def densenet190_k40_bc_svhn(num_classes=10, **kwargs): """ DenseNet-BC-190 (k=40) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=190, growth_rate=40, bottleneck=True, model_name="densenet190_k40_bc_svhn", **kwargs) def densenet250_k24_bc_cifar10(num_classes=10, **kwargs): """ DenseNet-BC-250 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=250, growth_rate=24, bottleneck=True, model_name="densenet250_k24_bc_cifar10", **kwargs) def densenet250_k24_bc_cifar100(num_classes=100, **kwargs): """ DenseNet-BC-250 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=250, growth_rate=24, bottleneck=True, model_name="densenet250_k24_bc_cifar100", **kwargs) def densenet250_k24_bc_svhn(num_classes=10, **kwargs): """ DenseNet-BC-250 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet_cifar(num_classes=num_classes, blocks=250, growth_rate=24, bottleneck=True, model_name="densenet250_k24_bc_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (densenet40_k12_cifar10, 10), (densenet40_k12_cifar100, 100), (densenet40_k12_svhn, 10), (densenet40_k12_bc_cifar10, 10), (densenet40_k12_bc_cifar100, 100), (densenet40_k12_bc_svhn, 10), (densenet40_k24_bc_cifar10, 10), (densenet40_k24_bc_cifar100, 100), (densenet40_k24_bc_svhn, 10), (densenet40_k36_bc_cifar10, 10), (densenet40_k36_bc_cifar100, 100), (densenet40_k36_bc_svhn, 10), (densenet100_k12_cifar10, 10), (densenet100_k12_cifar100, 100), (densenet100_k12_svhn, 10), (densenet100_k24_cifar10, 10), (densenet100_k24_cifar100, 100), (densenet100_k24_svhn, 10), (densenet100_k12_bc_cifar10, 10), (densenet100_k12_bc_cifar100, 100), (densenet100_k12_bc_svhn, 10), (densenet190_k40_bc_cifar10, 10), (densenet190_k40_bc_cifar100, 100), (densenet190_k40_bc_svhn, 10), (densenet250_k24_bc_cifar10, 10), (densenet250_k24_bc_cifar100, 100), (densenet250_k24_bc_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != densenet40_k12_cifar10 or weight_count == 599050) assert (model != densenet40_k12_cifar100 or weight_count == 622360) assert (model != densenet40_k12_svhn or weight_count == 599050) assert (model != densenet40_k12_bc_cifar10 or weight_count == 176122) assert (model != densenet40_k12_bc_cifar100 or weight_count == 188092) assert (model != densenet40_k12_bc_svhn or weight_count == 176122) assert (model != densenet40_k24_bc_cifar10 or weight_count == 690346) assert (model != densenet40_k24_bc_cifar100 or weight_count == 714196) assert (model != densenet40_k24_bc_svhn or weight_count == 690346) assert (model != densenet40_k36_bc_cifar10 or weight_count == 1542682) assert (model != densenet40_k36_bc_cifar100 or weight_count == 1578412) assert (model != densenet40_k36_bc_svhn or weight_count == 1542682) assert (model != densenet100_k12_cifar10 or weight_count == 4068490) assert (model != densenet100_k12_cifar100 or weight_count == 4129600) assert (model != densenet100_k12_svhn or weight_count == 4068490) assert (model != densenet100_k24_cifar10 or weight_count == 16114138) assert (model != densenet100_k24_cifar100 or weight_count == 16236268) assert (model != densenet100_k24_svhn or weight_count == 16114138) assert (model != densenet100_k12_bc_cifar10 or weight_count == 769162) assert (model != densenet100_k12_bc_cifar100 or weight_count == 800032) assert (model != densenet100_k12_bc_svhn or weight_count == 769162) assert (model != densenet190_k40_bc_cifar10 or weight_count == 25624430) assert (model != densenet190_k40_bc_cifar100 or weight_count == 25821620) assert (model != densenet190_k40_bc_svhn or weight_count == 25624430) assert (model != densenet250_k24_bc_cifar10 or weight_count == 15324406) assert (model != densenet250_k24_bc_cifar100 or weight_count == 15480556) assert (model != densenet250_k24_bc_svhn or weight_count == 15324406) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes)) if __name__ == "__main__": _test()
29,468
36.780769
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/bninception.py
""" BN-Inception for ImageNet-1K, implemented in PyTorch. Original paper: 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift,' https://arxiv.org/abs/1502.03167. """ __all__ = ['BNInception', 'bninception'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block, conv7x7_block, Concurrent class Inception3x3Branch(nn.Module): """ BN-Inception 3x3 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. stride : int or tuple/list of 2 int, default 1 Strides of the second convolution. bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layers. """ def __init__(self, in_channels, out_channels, mid_channels, stride=1, bias=True, use_bn=True): super(Inception3x3Branch, self).__init__() self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bias=bias, use_bn=use_bn) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class InceptionDouble3x3Branch(nn.Module): """ BN-Inception double 3x3 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. stride : int or tuple/list of 2 int, default 1 Strides of the second convolution. bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layers. """ def __init__(self, in_channels, out_channels, mid_channels, stride=1, bias=True, use_bn=True): super(InceptionDouble3x3Branch, self).__init__() self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bias=bias, use_bn=use_bn) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, bias=bias, use_bn=use_bn) self.conv3 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class InceptionPoolBranch(nn.Module): """ BN-Inception avg-pool branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. avg_pool : bool Whether use average pooling or max pooling. bias : bool Whether the convolution layer uses a bias vector. use_bn : bool Whether to use BatchNorm layers. """ def __init__(self, in_channels, out_channels, avg_pool, bias, use_bn): super(InceptionPoolBranch, self).__init__() if avg_pool: self.pool = nn.AvgPool2d( kernel_size=3, stride=1, padding=1, ceil_mode=True, count_include_pad=True) else: self.pool = nn.MaxPool2d( kernel_size=3, stride=1, padding=1, ceil_mode=True) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=bias, use_bn=use_bn) def forward(self, x): x = self.pool(x) x = self.conv(x) return x class StemBlock(nn.Module): """ BN-Inception stem block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. bias : bool Whether the convolution layer uses a bias vector. use_bn : bool Whether to use BatchNorm layers. """ def __init__(self, in_channels, out_channels, mid_channels, bias, use_bn): super(StemBlock, self).__init__() self.conv1 = conv7x7_block( in_channels=in_channels, out_channels=mid_channels, stride=2, bias=bias, use_bn=use_bn) self.pool1 = nn.MaxPool2d( kernel_size=3, stride=2, padding=0, ceil_mode=True) self.conv2 = Inception3x3Branch( in_channels=mid_channels, out_channels=out_channels, mid_channels=mid_channels) self.pool2 = nn.MaxPool2d( kernel_size=3, stride=2, padding=0, ceil_mode=True) def forward(self, x): x = self.conv1(x) x = self.pool1(x) x = self.conv2(x) x = self.pool2(x) return x class InceptionBlock(nn.Module): """ BN-Inception unit. Parameters: ---------- in_channels : int Number of input channels. mid1_channels_list : list of int Number of pre-middle channels for branches. mid2_channels_list : list of int Number of middle channels for branches. avg_pool : bool Whether use average pooling or max pooling. bias : bool Whether the convolution layer uses a bias vector. use_bn : bool Whether to use BatchNorm layers. """ def __init__(self, in_channels, mid1_channels_list, mid2_channels_list, avg_pool, bias, use_bn): super(InceptionBlock, self).__init__() assert (len(mid1_channels_list) == 2) assert (len(mid2_channels_list) == 4) self.branches = Concurrent() self.branches.add_module("branch1", conv1x1_block( in_channels=in_channels, out_channels=mid2_channels_list[0], bias=bias, use_bn=use_bn)) self.branches.add_module("branch2", Inception3x3Branch( in_channels=in_channels, out_channels=mid2_channels_list[1], mid_channels=mid1_channels_list[0], bias=bias, use_bn=use_bn)) self.branches.add_module("branch3", InceptionDouble3x3Branch( in_channels=in_channels, out_channels=mid2_channels_list[2], mid_channels=mid1_channels_list[1], bias=bias, use_bn=use_bn)) self.branches.add_module("branch4", InceptionPoolBranch( in_channels=in_channels, out_channels=mid2_channels_list[3], avg_pool=avg_pool, bias=bias, use_bn=use_bn)) def forward(self, x): x = self.branches(x) return x class ReductionBlock(nn.Module): """ BN-Inception reduction block. Parameters: ---------- in_channels : int Number of input channels. mid1_channels_list : list of int Number of pre-middle channels for branches. mid2_channels_list : list of int Number of middle channels for branches. bias : bool Whether the convolution layer uses a bias vector. use_bn : bool Whether to use BatchNorm layers. """ def __init__(self, in_channels, mid1_channels_list, mid2_channels_list, bias, use_bn): super(ReductionBlock, self).__init__() assert (len(mid1_channels_list) == 2) assert (len(mid2_channels_list) == 4) self.branches = Concurrent() self.branches.add_module("branch1", Inception3x3Branch( in_channels=in_channels, out_channels=mid2_channels_list[1], mid_channels=mid1_channels_list[0], stride=2, bias=bias, use_bn=use_bn)) self.branches.add_module("branch2", InceptionDouble3x3Branch( in_channels=in_channels, out_channels=mid2_channels_list[2], mid_channels=mid1_channels_list[1], stride=2, bias=bias, use_bn=use_bn)) self.branches.add_module("branch3", nn.MaxPool2d( kernel_size=3, stride=2, padding=0, ceil_mode=True)) def forward(self, x): x = self.branches(x) return x class BNInception(nn.Module): """ BN-Inception model from 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift,' https://arxiv.org/abs/1502.03167. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels_list : list of int Number of output channels for the initial unit. mid1_channels_list : list of list of list of int Number of pre-middle channels for each unit. mid2_channels_list : list of list of list of int Number of middle channels for each unit. bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels_list, mid1_channels_list, mid2_channels_list, bias=True, use_bn=True, in_channels=3, in_size=(224, 224), num_classes=1000): super(BNInception, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", StemBlock( in_channels=in_channels, out_channels=init_block_channels_list[1], mid_channels=init_block_channels_list[0], bias=bias, use_bn=use_bn)) in_channels = init_block_channels_list[-1] for i, channels_per_stage in enumerate(channels): mid1_channels_list_i = mid1_channels_list[i] mid2_channels_list_i = mid2_channels_list[i] stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): if (j == 0) and (i != 0): stage.add_module("unit{}".format(j + 1), ReductionBlock( in_channels=in_channels, mid1_channels_list=mid1_channels_list_i[j], mid2_channels_list=mid2_channels_list_i[j], bias=bias, use_bn=use_bn)) else: avg_pool = (i != len(channels) - 1) or (j != len(channels_per_stage) - 1) stage.add_module("unit{}".format(j + 1), InceptionBlock( in_channels=in_channels, mid1_channels_list=mid1_channels_list_i[j], mid2_channels_list=mid2_channels_list_i[j], avg_pool=avg_pool, bias=bias, use_bn=use_bn)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_bninception(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create BN-Inception model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels_list = [64, 192] channels = [[256, 320], [576, 576, 576, 608, 608], [1056, 1024, 1024]] mid1_channels_list = [ [[64, 64], [64, 64]], [[128, 64], # 3c [64, 96], # 4a [96, 96], # 4a [128, 128], # 4c [128, 160]], # 4d [[128, 192], # 4e [192, 160], # 5a [192, 192]], ] mid2_channels_list = [ [[64, 64, 96, 32], [64, 96, 96, 64]], [[0, 160, 96, 0], # 3c [224, 96, 128, 128], # 4a [192, 128, 128, 128], # 4b [160, 160, 160, 128], # 4c [96, 192, 192, 128]], # 4d [[0, 192, 256, 0], # 4e [352, 320, 224, 128], # 5a [352, 320, 224, 128]], ] net = BNInception( channels=channels, init_block_channels_list=init_block_channels_list, mid1_channels_list=mid1_channels_list, mid2_channels_list=mid2_channels_list, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def bninception(**kwargs): """ BN-Inception model from 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift,' https://arxiv.org/abs/1502.03167. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_bninception(model_name="bninception", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ bninception, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != bninception or weight_count == 11295240) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
16,280
29.488764
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/msdnet.py
""" MSDNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Multi-Scale Dense Networks for Resource Efficient Image Classification,' https://arxiv.org/abs/1703.09844. """ __all__ = ['MSDNet', 'msdnet22', 'MultiOutputSequential', 'MSDFeatureBlock'] import os import math import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block from .resnet import ResInitBlock class MultiOutputSequential(nn.Sequential): """ A sequential container for modules. Modules will be executed in the order they are added. Output value contains results from all modules. """ def __init__(self, *args): super(MultiOutputSequential, self).__init__(*args) def forward(self, x): outs = [] for module in self._modules.values(): x = module(x) outs.append(x) return outs class MultiBlockSequential(nn.Sequential): """ A sequential container for modules. Modules will be executed in the order they are added. Input is a list with length equal to number of modules. """ def __init__(self, *args): super(MultiBlockSequential, self).__init__(*args) def forward(self, x): outs = [] for module, x_i in zip(self._modules.values(), x): y = module(x_i) outs.append(y) return outs class MSDBaseBlock(nn.Module): """ MSDNet base block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. use_bottleneck : bool Whether to use a bottleneck. bottleneck_factor : int Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, use_bottleneck, bottleneck_factor): super(MSDBaseBlock, self).__init__() self.use_bottleneck = use_bottleneck mid_channels = min(in_channels, bottleneck_factor * out_channels) if use_bottleneck else in_channels if self.use_bottleneck: self.bn_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, stride=stride) def forward(self, x): if self.use_bottleneck: x = self.bn_conv(x) x = self.conv(x) return x class MSDFirstScaleBlock(nn.Module): """ MSDNet first scale dense block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_bottleneck : bool Whether to use a bottleneck. bottleneck_factor : int Bottleneck factor. """ def __init__(self, in_channels, out_channels, use_bottleneck, bottleneck_factor): super(MSDFirstScaleBlock, self).__init__() assert (out_channels > in_channels) inc_channels = out_channels - in_channels self.block = MSDBaseBlock( in_channels=in_channels, out_channels=inc_channels, stride=1, use_bottleneck=use_bottleneck, bottleneck_factor=bottleneck_factor) def forward(self, x): y = self.block(x) y = torch.cat((x, y), dim=1) return y class MSDScaleBlock(nn.Module): """ MSDNet ordinary scale dense block. Parameters: ---------- in_channels_prev : int Number of input channels for the previous scale. in_channels : int Number of input channels for the current scale. out_channels : int Number of output channels. use_bottleneck : bool Whether to use a bottleneck. bottleneck_factor_prev : int Bottleneck factor for the previous scale. bottleneck_factor : int Bottleneck factor for the current scale. """ def __init__(self, in_channels_prev, in_channels, out_channels, use_bottleneck, bottleneck_factor_prev, bottleneck_factor): super(MSDScaleBlock, self).__init__() assert (out_channels > in_channels) assert (out_channels % 2 == 0) inc_channels = out_channels - in_channels mid_channels = inc_channels // 2 self.down_block = MSDBaseBlock( in_channels=in_channels_prev, out_channels=mid_channels, stride=2, use_bottleneck=use_bottleneck, bottleneck_factor=bottleneck_factor_prev) self.curr_block = MSDBaseBlock( in_channels=in_channels, out_channels=mid_channels, stride=1, use_bottleneck=use_bottleneck, bottleneck_factor=bottleneck_factor) def forward(self, x_prev, x): y_prev = self.down_block(x_prev) y = self.curr_block(x) x = torch.cat((x, y_prev, y), dim=1) return x class MSDInitLayer(nn.Module): """ MSDNet initial (so-called first) layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : list/tuple of int Number of output channels for each scale. """ def __init__(self, in_channels, out_channels): super(MSDInitLayer, self).__init__() self.scale_blocks = MultiOutputSequential() for i, out_channels_per_scale in enumerate(out_channels): if i == 0: self.scale_blocks.add_module("scale_block{}".format(i + 1), ResInitBlock( in_channels=in_channels, out_channels=out_channels_per_scale)) else: self.scale_blocks.add_module("scale_block{}".format(i + 1), conv3x3_block( in_channels=in_channels, out_channels=out_channels_per_scale, stride=2)) in_channels = out_channels_per_scale def forward(self, x): y = self.scale_blocks(x) return y class MSDLayer(nn.Module): """ MSDNet ordinary layer. Parameters: ---------- in_channels : list/tuple of int Number of input channels for each input scale. out_channels : list/tuple of int Number of output channels for each output scale. use_bottleneck : bool Whether to use a bottleneck. bottleneck_factors : list/tuple of int Bottleneck factor for each input scale. """ def __init__(self, in_channels, out_channels, use_bottleneck, bottleneck_factors): super(MSDLayer, self).__init__() in_scales = len(in_channels) out_scales = len(out_channels) self.dec_scales = in_scales - out_scales assert (self.dec_scales >= 0) self.scale_blocks = nn.Sequential() for i in range(out_scales): if (i == 0) and (self.dec_scales == 0): self.scale_blocks.add_module("scale_block{}".format(i + 1), MSDFirstScaleBlock( in_channels=in_channels[self.dec_scales + i], out_channels=out_channels[i], use_bottleneck=use_bottleneck, bottleneck_factor=bottleneck_factors[self.dec_scales + i])) else: self.scale_blocks.add_module("scale_block{}".format(i + 1), MSDScaleBlock( in_channels_prev=in_channels[self.dec_scales + i - 1], in_channels=in_channels[self.dec_scales + i], out_channels=out_channels[i], use_bottleneck=use_bottleneck, bottleneck_factor_prev=bottleneck_factors[self.dec_scales + i - 1], bottleneck_factor=bottleneck_factors[self.dec_scales + i])) def forward(self, x): outs = [] for i in range(len(self.scale_blocks)): if (i == 0) and (self.dec_scales == 0): y = self.scale_blocks[i](x[i]) else: y = self.scale_blocks[i]( x_prev=x[self.dec_scales + i - 1], x=x[self.dec_scales + i]) outs.append(y) return outs class MSDTransitionLayer(nn.Module): """ MSDNet transition layer. Parameters: ---------- in_channels : list/tuple of int Number of input channels for each scale. out_channels : list/tuple of int Number of output channels for each scale. """ def __init__(self, in_channels, out_channels): super(MSDTransitionLayer, self).__init__() assert (len(in_channels) == len(out_channels)) self.scale_blocks = MultiBlockSequential() for i in range(len(out_channels)): self.scale_blocks.add_module("scale_block{}".format(i + 1), conv1x1_block( in_channels=in_channels[i], out_channels=out_channels[i])) def forward(self, x): y = self.scale_blocks(x) return y class MSDFeatureBlock(nn.Module): """ MSDNet feature block (stage of cascade, so-called block). Parameters: ---------- in_channels : list of list of int Number of input channels for each layer and for each input scale. out_channels : list of list of int Number of output channels for each layer and for each output scale. use_bottleneck : bool Whether to use a bottleneck. bottleneck_factors : list of list of int Bottleneck factor for each layer and for each input scale. """ def __init__(self, in_channels, out_channels, use_bottleneck, bottleneck_factors): super(MSDFeatureBlock, self).__init__() self.blocks = nn.Sequential() for i, out_channels_per_layer in enumerate(out_channels): if len(bottleneck_factors[i]) == 0: self.blocks.add_module("trans{}".format(i + 1), MSDTransitionLayer( in_channels=in_channels, out_channels=out_channels_per_layer)) else: self.blocks.add_module("layer{}".format(i + 1), MSDLayer( in_channels=in_channels, out_channels=out_channels_per_layer, use_bottleneck=use_bottleneck, bottleneck_factors=bottleneck_factors[i])) in_channels = out_channels_per_layer def forward(self, x): x = self.blocks(x) return x class MSDClassifier(nn.Module): """ MSDNet classifier. Parameters: ---------- in_channels : int Number of input channels. num_classes : int Number of classification classes. """ def __init__(self, in_channels, num_classes): super(MSDClassifier, self).__init__() self.features = nn.Sequential() self.features.add_module("conv1", conv3x3_block( in_channels=in_channels, out_channels=in_channels, stride=2)) self.features.add_module("conv2", conv3x3_block( in_channels=in_channels, out_channels=in_channels, stride=2)) self.features.add_module("pool", nn.AvgPool2d( kernel_size=2, stride=2)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x class MSDNet(nn.Module): """ MSDNet model from 'Multi-Scale Dense Networks for Resource Efficient Image Classification,' https://arxiv.org/abs/1703.09844. Parameters: ---------- channels : list of list of list of int Number of output channels for each unit. init_layer_channels : list of int Number of output channels for the initial layer. num_feature_blocks : int Number of subnets. use_bottleneck : bool Whether to use a bottleneck. bottleneck_factors : list of list of int Bottleneck factor for each layers and for each input scale. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_layer_channels, num_feature_blocks, use_bottleneck, bottleneck_factors, in_channels=3, in_size=(224, 224), num_classes=1000): super(MSDNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.init_layer = MSDInitLayer( in_channels=in_channels, out_channels=init_layer_channels) in_channels = init_layer_channels self.feature_blocks = nn.Sequential() self.classifiers = nn.Sequential() for i in range(num_feature_blocks): self.feature_blocks.add_module("block{}".format(i + 1), MSDFeatureBlock( in_channels=in_channels, out_channels=channels[i], use_bottleneck=use_bottleneck, bottleneck_factors=bottleneck_factors[i])) in_channels = channels[i][-1] self.classifiers.add_module("classifier{}".format(i + 1), MSDClassifier( in_channels=in_channels[-1], num_classes=num_classes)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x, only_last=True): x = self.init_layer(x) outs = [] for feature_block, classifier in zip(self.feature_blocks, self.classifiers): x = feature_block(x) y = classifier(x[-1]) outs.append(y) if only_last: return outs[-1] else: return outs def get_msdnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create MSDNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (blocks == 22) num_scales = 4 num_feature_blocks = 10 base = 4 step = 2 reduction_rate = 0.5 growth = 6 growth_factor = [1, 2, 4, 4] use_bottleneck = True bottleneck_factor_per_scales = [1, 2, 4, 4] assert (reduction_rate > 0.0) init_layer_channels = [64 * c for c in growth_factor[:num_scales]] step_mode = "even" layers_per_subnets = [base] for i in range(num_feature_blocks - 1): layers_per_subnets.append(step if step_mode == 'even' else step * i + 1) total_layers = sum(layers_per_subnets) interval = math.ceil(total_layers / num_scales) global_layer_ind = 0 channels = [] bottleneck_factors = [] in_channels_tmp = init_layer_channels in_scales = num_scales for i in range(num_feature_blocks): layers_per_subnet = layers_per_subnets[i] scales_i = [] channels_i = [] bottleneck_factors_i = [] for j in range(layers_per_subnet): out_scales = int(num_scales - math.floor(global_layer_ind / interval)) global_layer_ind += 1 scales_i += [out_scales] scale_offset = num_scales - out_scales in_dec_scales = num_scales - len(in_channels_tmp) out_channels = [in_channels_tmp[scale_offset - in_dec_scales + k] + growth * growth_factor[scale_offset + k] for k in range(out_scales)] in_dec_scales = num_scales - len(in_channels_tmp) bottleneck_factors_ij = bottleneck_factor_per_scales[in_dec_scales:][:len(in_channels_tmp)] in_channels_tmp = out_channels channels_i += [out_channels] bottleneck_factors_i += [bottleneck_factors_ij] if in_scales > out_scales: assert (in_channels_tmp[0] % growth_factor[scale_offset] == 0) out_channels1 = int(math.floor(in_channels_tmp[0] / growth_factor[scale_offset] * reduction_rate)) out_channels = [out_channels1 * growth_factor[scale_offset + k] for k in range(out_scales)] in_channels_tmp = out_channels channels_i += [out_channels] bottleneck_factors_i += [[]] in_scales = out_scales in_scales = scales_i[-1] channels += [channels_i] bottleneck_factors += [bottleneck_factors_i] net = MSDNet( channels=channels, init_layer_channels=init_layer_channels, num_feature_blocks=num_feature_blocks, use_bottleneck=use_bottleneck, bottleneck_factors=bottleneck_factors, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def msdnet22(**kwargs): """ MSDNet-22 model from 'Multi-Scale Dense Networks for Resource Efficient Image Classification,' https://arxiv.org/abs/1703.09844. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_msdnet(blocks=22, model_name="msdnet22", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ msdnet22, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != msdnet22 or weight_count == 20106676) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
19,529
30.65316
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/zfnet.py
""" ZFNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901. """ __all__ = ['zfnet', 'zfnetb'] import os from .alexnet import AlexNet def get_zfnet(version="a", model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ZFNet model with specific parameters. Parameters: ---------- version : str, default 'a' Version of ZFNet ('a' or 'b'). model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if version == "a": channels = [[96], [256], [384, 384, 256]] kernel_sizes = [[7], [5], [3, 3, 3]] strides = [[2], [2], [1, 1, 1]] paddings = [[1], [0], [1, 1, 1]] use_lrn = True elif version == "b": channels = [[96], [256], [512, 1024, 512]] kernel_sizes = [[7], [5], [3, 3, 3]] strides = [[2], [2], [1, 1, 1]] paddings = [[1], [0], [1, 1, 1]] use_lrn = True else: raise ValueError("Unsupported ZFNet version {}".format(version)) net = AlexNet( channels=channels, kernel_sizes=kernel_sizes, strides=strides, paddings=paddings, use_lrn=use_lrn, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def zfnet(**kwargs): """ ZFNet model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_zfnet(model_name="zfnet", **kwargs) def zfnetb(**kwargs): """ ZFNet-b model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_zfnet(version="b", model_name="zfnetb", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ zfnet, zfnetb, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != zfnet or weight_count == 62357608) assert (model != zfnetb or weight_count == 107627624) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
3,659
26.727273
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/peleenet.py
""" PeleeNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Pelee: A Real-Time Object Detection System on Mobile Devices,' https://arxiv.org/abs/1804.06882. """ __all__ = ['PeleeNet', 'peleenet'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block, Concurrent class PeleeBranch1(nn.Module): """ PeleeNet branch type 1 block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. stride : int or tuple/list of 2 int, default 1 Strides of the second convolution. """ def __init__(self, in_channels, out_channels, mid_channels, stride=1): super(PeleeBranch1, self).__init__() self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, stride=stride) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class PeleeBranch2(nn.Module): """ PeleeNet branch type 2 block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. """ def __init__(self, in_channels, out_channels, mid_channels): super(PeleeBranch2, self).__init__() self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels) self.conv3 = conv3x3_block( in_channels=out_channels, out_channels=out_channels) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class StemBlock(nn.Module): """ PeleeNet stem block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(StemBlock, self).__init__() mid1_channels = out_channels // 2 mid2_channels = out_channels * 2 self.first_conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2) self.branches = Concurrent() self.branches.add_module("branch1", PeleeBranch1( in_channels=out_channels, out_channels=out_channels, mid_channels=mid1_channels, stride=2)) self.branches.add_module("branch2", nn.MaxPool2d( kernel_size=2, stride=2, padding=0)) self.last_conv = conv1x1_block( in_channels=mid2_channels, out_channels=out_channels) def forward(self, x): x = self.first_conv(x) x = self.branches(x) x = self.last_conv(x) return x class DenseBlock(nn.Module): """ PeleeNet dense block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bottleneck_size : int Bottleneck width. """ def __init__(self, in_channels, out_channels, bottleneck_size): super(DenseBlock, self).__init__() inc_channels = (out_channels - in_channels) // 2 mid_channels = inc_channels * bottleneck_size self.branch1 = PeleeBranch1( in_channels=in_channels, out_channels=inc_channels, mid_channels=mid_channels) self.branch2 = PeleeBranch2( in_channels=in_channels, out_channels=inc_channels, mid_channels=mid_channels) def forward(self, x): x1 = self.branch1(x) x2 = self.branch2(x) x = torch.cat((x, x1, x2), dim=1) return x class TransitionBlock(nn.Module): """ PeleeNet's transition block, like in DensNet, but with ordinary convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(TransitionBlock, self).__init__() self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels) self.pool = nn.AvgPool2d( kernel_size=2, stride=2, padding=0) def forward(self, x): x = self.conv(x) x = self.pool(x) return x class PeleeNet(nn.Module): """ PeleeNet model from 'Pelee: A Real-Time Object Detection System on Mobile Devices,' https://arxiv.org/abs/1804.06882. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck_sizes : list of int Bottleneck sizes for each stage. dropout_rate : float, default 0.5 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck_sizes, dropout_rate=0.5, in_channels=3, in_size=(224, 224), num_classes=1000): super(PeleeNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", StemBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): bottleneck_size = bottleneck_sizes[i] stage = nn.Sequential() if i != 0: stage.add_module("trans{}".format(i + 1), TransitionBlock( in_channels=in_channels, out_channels=in_channels)) for j, out_channels in enumerate(channels_per_stage): stage.add_module("unit{}".format(j + 1), DenseBlock( in_channels=in_channels, out_channels=out_channels, bottleneck_size=bottleneck_size)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", conv1x1_block( in_channels=in_channels, out_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Sequential() self.output.add_module("dropout", nn.Dropout(p=dropout_rate)) self.output.add_module("fc", nn.Linear( in_features=in_channels, out_features=num_classes)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_peleenet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create PeleeNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 32 growth_rate = 32 layers = [3, 4, 8, 6] bottleneck_sizes = [1, 2, 4, 4] from functools import reduce channels = reduce( lambda xi, yi: xi + [reduce( lambda xj, yj: xj + [xj[-1] + yj], [growth_rate] * yi, [xi[-1][-1]])[1:]], layers, [[init_block_channels]])[1:] net = PeleeNet( channels=channels, init_block_channels=init_block_channels, bottleneck_sizes=bottleneck_sizes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def peleenet(**kwargs): """ PeleeNet model from 'Pelee: A Real-Time Object Detection System on Mobile Devices,' https://arxiv.org/abs/1804.06882. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_peleenet(model_name="peleenet", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ peleenet, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != peleenet or weight_count == 2802248) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
10,823
27.710875
117
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/msdnet_cifar10.py
""" MSDNet for CIFAR-10, implemented in PyTorch. Original paper: 'Multi-Scale Dense Networks for Resource Efficient Image Classification,' https://arxiv.org/abs/1703.09844. """ __all__ = ['CIFAR10MSDNet', 'msdnet22_cifar10'] import os import math import torch.nn as nn import torch.nn.init as init from .common import conv3x3_block from .msdnet import MultiOutputSequential, MSDFeatureBlock class CIFAR10MSDInitLayer(nn.Module): """ MSDNet initial (so-called first) layer for CIFAR-10. Parameters: ---------- in_channels : int Number of input channels. out_channels : list/tuple of int Number of output channels for each scale. """ def __init__(self, in_channels, out_channels): super(CIFAR10MSDInitLayer, self).__init__() self.scale_blocks = MultiOutputSequential() for i, out_channels_per_scale in enumerate(out_channels): stride = 1 if i == 0 else 2 self.scale_blocks.add_module("scale_block{}".format(i + 1), conv3x3_block( in_channels=in_channels, out_channels=out_channels_per_scale, stride=stride)) in_channels = out_channels_per_scale def forward(self, x): y = self.scale_blocks(x) return y class CIFAR10MSDClassifier(nn.Module): """ MSDNet classifier for CIFAR-10. Parameters: ---------- in_channels : int Number of input channels. num_classes : int Number of classification classes. """ def __init__(self, in_channels, num_classes): super(CIFAR10MSDClassifier, self).__init__() mid_channels = 128 self.features = nn.Sequential() self.features.add_module("conv1", conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2)) self.features.add_module("conv2", conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=2)) self.features.add_module("pool", nn.AvgPool2d( kernel_size=2, stride=2)) self.output = nn.Linear( in_features=mid_channels, out_features=num_classes) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x class CIFAR10MSDNet(nn.Module): """ MSDNet model for CIFAR-10 from 'Multi-Scale Dense Networks for Resource Efficient Image Classification,' https://arxiv.org/abs/1703.09844. Parameters: ---------- channels : list of list of list of int Number of output channels for each unit. init_layer_channels : list of int Number of output channels for the initial layer. num_feature_blocks : int Number of subnets. use_bottleneck : bool Whether to use a bottleneck. bottleneck_factors : list of list of int Bottleneck factor for each layers and for each input scale. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_layer_channels, num_feature_blocks, use_bottleneck, bottleneck_factors, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFAR10MSDNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.init_layer = CIFAR10MSDInitLayer( in_channels=in_channels, out_channels=init_layer_channels) in_channels = init_layer_channels self.feature_blocks = nn.Sequential() self.classifiers = nn.Sequential() for i in range(num_feature_blocks): self.feature_blocks.add_module("block{}".format(i + 1), MSDFeatureBlock( in_channels=in_channels, out_channels=channels[i], use_bottleneck=use_bottleneck, bottleneck_factors=bottleneck_factors[i])) in_channels = channels[i][-1] self.classifiers.add_module("classifier{}".format(i + 1), CIFAR10MSDClassifier( in_channels=in_channels[-1], num_classes=num_classes)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x, only_last=True): x = self.init_layer(x) outs = [] for feature_block, classifier in zip(self.feature_blocks, self.classifiers): x = feature_block(x) y = classifier(x[-1]) outs.append(y) if only_last: return outs[-1] else: return outs def get_msdnet_cifar10(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create MSDNet model for CIFAR-10 with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (blocks == 22) num_scales = 3 num_feature_blocks = 10 base = 4 step = 2 reduction_rate = 0.5 growth = 6 growth_factor = [1, 2, 4, 4] use_bottleneck = True bottleneck_factor_per_scales = [1, 2, 4, 4] assert (reduction_rate > 0.0) init_layer_channels = [16 * c for c in growth_factor[:num_scales]] step_mode = "even" layers_per_subnets = [base] for i in range(num_feature_blocks - 1): layers_per_subnets.append(step if step_mode == 'even' else step * i + 1) total_layers = sum(layers_per_subnets) interval = math.ceil(total_layers / num_scales) global_layer_ind = 0 channels = [] bottleneck_factors = [] in_channels_tmp = init_layer_channels in_scales = num_scales for i in range(num_feature_blocks): layers_per_subnet = layers_per_subnets[i] scales_i = [] channels_i = [] bottleneck_factors_i = [] for j in range(layers_per_subnet): out_scales = int(num_scales - math.floor(global_layer_ind / interval)) global_layer_ind += 1 scales_i += [out_scales] scale_offset = num_scales - out_scales in_dec_scales = num_scales - len(in_channels_tmp) out_channels = [in_channels_tmp[scale_offset - in_dec_scales + k] + growth * growth_factor[scale_offset + k] for k in range(out_scales)] in_dec_scales = num_scales - len(in_channels_tmp) bottleneck_factors_ij = bottleneck_factor_per_scales[in_dec_scales:][:len(in_channels_tmp)] in_channels_tmp = out_channels channels_i += [out_channels] bottleneck_factors_i += [bottleneck_factors_ij] if in_scales > out_scales: assert (in_channels_tmp[0] % growth_factor[scale_offset] == 0) out_channels1 = int(math.floor(in_channels_tmp[0] / growth_factor[scale_offset] * reduction_rate)) out_channels = [out_channels1 * growth_factor[scale_offset + k] for k in range(out_scales)] in_channels_tmp = out_channels channels_i += [out_channels] bottleneck_factors_i += [[]] in_scales = out_scales in_scales = scales_i[-1] channels += [channels_i] bottleneck_factors += [bottleneck_factors_i] net = CIFAR10MSDNet( channels=channels, init_layer_channels=init_layer_channels, num_feature_blocks=num_feature_blocks, use_bottleneck=use_bottleneck, bottleneck_factors=bottleneck_factors, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def msdnet22_cifar10(**kwargs): """ MSDNet-22 model for CIFAR-10 from 'Multi-Scale Dense Networks for Resource Efficient Image Classification,' https://arxiv.org/abs/1703.09844. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_msdnet_cifar10(blocks=22, model_name="msdnet22_cifar10", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ msdnet22_cifar10, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != msdnet22_cifar10 or weight_count == 4839544) # 5440864 x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 10)) if __name__ == "__main__": _test()
10,172
30.691589
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/erfnet.py
""" ERFNet for image segmentation, implemented in PyTorch. Original paper: 'ERFNet: Efficient Residual Factorized ConvNet for Real-time Semantic Segmentation,' http://www.robesafe.uah.es/personal/eduardo.romera/pdfs/Romera17tits.pdf. """ __all__ = ['ERFNet', 'erfnet_cityscapes', 'FCU'] import os import torch import torch.nn as nn from .common import deconv3x3_block, AsymConvBlock from .enet import ENetMixDownBlock class FCU(nn.Module): """ Factorized convolution unit. Parameters: ---------- channels : int Number of input/output channels. kernel_size : int Convolution window size. dilation : int Dilation value for convolution layer. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, channels, kernel_size, dilation, dropout_rate, bn_eps): super(FCU, self).__init__() self.use_dropout = (dropout_rate != 0.0) padding1 = (kernel_size - 1) // 2 padding2 = padding1 * dilation self.conv1 = AsymConvBlock( channels=channels, kernel_size=kernel_size, padding=padding1, bias=True, lw_use_bn=False, bn_eps=bn_eps) self.conv2 = AsymConvBlock( channels=channels, kernel_size=kernel_size, padding=padding2, dilation=dilation, bias=True, lw_use_bn=False, bn_eps=bn_eps, rw_activation=None) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) self.activ = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.conv1(x) x = self.conv2(x) if self.use_dropout: x = self.dropout(x) x = x + identity x = self.activ(x) return x class ERFNet(nn.Module): """ ERFNet model from 'ERFNet: Efficient Residual Factorized ConvNet for Real-time Semantic Segmentation,' http://www.robesafe.uah.es/personal/eduardo.romera/pdfs/Romera17tits.pdf. Parameters: ---------- channels : list of int Number of output channels for the first unit of each stage. dilations : list of list of int Dilation values for each unit. dropout_rates : list of float Parameter of dropout layer for each stage. downs : list of int Whether to downscale or upscale in each stage. correct_size_mistmatch : bool Whether to correct downscaled sizes of images in encoder. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, channels, dilations, dropout_rates, downs, correct_size_mismatch=False, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(ERFNet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size bias = True self.encoder = nn.Sequential() self.decoder = nn.Sequential() enc_idx = 0 dec_idx = 0 for i, out_channels in enumerate(channels): dilations_per_stage = dilations[i] dropout_rates_per_stage = dropout_rates[i] is_down = downs[i] stage = nn.Sequential() for j, dilation in enumerate(dilations_per_stage): if j == 0: if is_down: unit = ENetMixDownBlock( in_channels=in_channels, out_channels=out_channels, bias=bias, bn_eps=bn_eps, correct_size_mismatch=correct_size_mismatch) else: unit = deconv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, bias=bias, bn_eps=bn_eps) else: unit = FCU( channels=in_channels, kernel_size=3, dilation=dilation, dropout_rate=dropout_rates_per_stage[j], bn_eps=bn_eps) stage.add_module("unit{}".format(j + 1), unit) in_channels = out_channels if is_down: enc_idx += 1 self.encoder.add_module("stage{}".format(enc_idx), stage) else: dec_idx += 1 self.decoder.add_module("stage{}".format(dec_idx), stage) self.head = nn.ConvTranspose2d( in_channels=in_channels, out_channels=num_classes, kernel_size=2, stride=2, padding=0, output_padding=0, bias=True) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.encoder(x) x = self.decoder(x) x = self.head(x) return x def get_erfnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ERFNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ downs = [1, 1, 1, 0, 0] channels = [16, 64, 128, 64, 16] dilations = [[1], [1, 1, 1, 1, 1, 1], [1, 2, 4, 8, 16, 2, 4, 8, 16], [1, 1, 1], [1, 1, 1]] dropout_rates = [[0.0], [0.03, 0.03, 0.03, 0.03, 0.03, 0.03], [0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] net = ERFNet( channels=channels, dilations=dilations, dropout_rates=dropout_rates, downs=downs, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def erfnet_cityscapes(num_classes=19, **kwargs): """ ERFNet model for Cityscapes from 'ERFNet: Efficient Residual Factorized ConvNet for Real-time Semantic Segmentation,' http://www.robesafe.uah.es/personal/eduardo.romera/pdfs/Romera17tits.pdf. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_erfnet(num_classes=num_classes, model_name="erfnet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ erfnet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != erfnet_cityscapes or weight_count == 2064191) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
9,330
31.175862
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/sharesnet.py
""" ShaResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. """ __all__ = ['ShaResNet', 'sharesnet18', 'sharesnet34', 'sharesnet50', 'sharesnet50b', 'sharesnet101', 'sharesnet101b', 'sharesnet152', 'sharesnet152b'] import os from inspect import isfunction import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block from .resnet import ResInitBlock class ShaConvBlock(nn.Module): """ Shared convolution block with Batch normalization and ReLU/ReLU6 activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. activate : bool, default True Whether activate the convolution block. shared_conv : Module, default None Shared convolution layer. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, activation=(lambda: nn.ReLU(inplace=True)), activate=True, shared_conv=None): super(ShaConvBlock, self).__init__() self.activate = activate if shared_conv is None: self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) else: self.conv = shared_conv self.bn = nn.BatchNorm2d(num_features=out_channels) if self.activate: assert (activation is not None) if isfunction(activation): self.activ = activation() elif isinstance(activation, str): if activation == "relu": self.activ = nn.ReLU(inplace=True) elif activation == "relu6": self.activ = nn.ReLU6(inplace=True) else: raise NotImplementedError() else: self.activ = activation def forward(self, x): x = self.conv(x) x = self.bn(x) if self.activate: x = self.activ(x) return x def sha_conv3x3_block(in_channels, out_channels, stride=1, padding=1, dilation=1, groups=1, bias=False, activation=(lambda: nn.ReLU(inplace=True)), activate=True, shared_conv=None): """ 3x3 version of the shared convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. activate : bool, default True Whether activate the convolution block. shared_conv : Module, default None Shared convolution layer. """ return ShaConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, activation=activation, activate=activate, shared_conv=shared_conv) class ShaResBlock(nn.Module): """ Simple ShaResNet block for residual path in ShaResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. shared_conv : Module, default None Shared convolution layer. """ def __init__(self, in_channels, out_channels, stride, shared_conv=None): super(ShaResBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride) self.conv2 = sha_conv3x3_block( in_channels=out_channels, out_channels=out_channels, activation=None, activate=False, shared_conv=shared_conv) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class ShaResBottleneck(nn.Module): """ ShaResNet bottleneck block for residual path in ShaResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck_factor : int, default 4 Bottleneck factor. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. shared_conv : Module, default None Shared convolution layer. """ def __init__(self, in_channels, out_channels, stride, conv1_stride=False, bottleneck_factor=4, shared_conv=None): super(ShaResBottleneck, self).__init__() assert (conv1_stride or not ((stride > 1) and (shared_conv is not None))) mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, stride=(stride if conv1_stride else 1)) self.conv2 = sha_conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=(1 if conv1_stride else stride), shared_conv=shared_conv) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class ShaResUnit(nn.Module): """ ShaResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. shared_conv : Module, default None Shared convolution layer. """ def __init__(self, in_channels, out_channels, stride, bottleneck, conv1_stride, shared_conv=None): super(ShaResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = ShaResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride, shared_conv=shared_conv) else: self.body = ShaResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, shared_conv=shared_conv) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class ShaResNet(nn.Module): """ ShaResNet model from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), num_classes=1000): super(ShaResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() shared_conv = None for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 unit = ShaResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride, shared_conv=shared_conv) if (shared_conv is None) and not (bottleneck and not conv1_stride and stride > 1): shared_conv = unit.body.conv2.conv stage.add_module("unit{}".format(j + 1), unit) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_sharesnet(blocks, conv1_stride=True, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ShaResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 18: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported ShaResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 if blocks < 50: channels_per_layers = [64, 128, 256, 512] bottleneck = False else: channels_per_layers = [256, 512, 1024, 2048] bottleneck = True channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = ShaResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def sharesnet18(**kwargs): """ ShaResNet-18 model from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=18, model_name="sharesnet18", **kwargs) def sharesnet34(**kwargs): """ ShaResNet-34 model from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=34, model_name="sharesnet34", **kwargs) def sharesnet50(**kwargs): """ ShaResNet-50 model from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=50, model_name="sharesnet50", **kwargs) def sharesnet50b(**kwargs): """ ShaResNet-50b model with stride at the second convolution in bottleneck block from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=50, conv1_stride=False, model_name="sharesnet50b", **kwargs) def sharesnet101(**kwargs): """ ShaResNet-101 model from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=101, model_name="sharesnet101", **kwargs) def sharesnet101b(**kwargs): """ ShaResNet-101b model with stride at the second convolution in bottleneck block from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=101, conv1_stride=False, model_name="sharesnet101b", **kwargs) def sharesnet152(**kwargs): """ ShaResNet-152 model from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=152, model_name="sharesnet152", **kwargs) def sharesnet152b(**kwargs): """ ShaResNet-152b model with stride at the second convolution in bottleneck block from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=152, conv1_stride=False, model_name="sharesnet152b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ sharesnet18, sharesnet34, sharesnet50, sharesnet50b, sharesnet101, sharesnet101b, sharesnet152, sharesnet152b, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != sharesnet18 or weight_count == 8556072) assert (model != sharesnet34 or weight_count == 13613864) assert (model != sharesnet50 or weight_count == 17373224) assert (model != sharesnet50b or weight_count == 20469800) assert (model != sharesnet101 or weight_count == 26338344) assert (model != sharesnet101b or weight_count == 29434920) assert (model != sharesnet152 or weight_count == 33724456) assert (model != sharesnet152b or weight_count == 36821032) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
19,841
31.263415
117
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/ibppose_coco.py
""" IBPPose for COCO Keypoint, implemented in PyTorch. Original paper: 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person Pose Estimation,' https://arxiv.org/abs/1911.10529. """ __all__ = ['IbpPose', 'ibppose_coco'] import os import torch from torch import nn from .common import get_activation_layer, conv1x1_block, conv3x3_block, conv7x7_block, SEBlock, Hourglass,\ InterpolationBlock class IbpResBottleneck(nn.Module): """ Bottleneck block for residual path in the residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bias : bool, default False Whether the layer uses a bias vector. bottleneck_factor : int, default 2 Bottleneck factor. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, stride, bias=False, bottleneck_factor=2, activation=(lambda: nn.ReLU(inplace=True))): super(IbpResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bias=bias, activation=activation) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, bias=bias, activation=activation) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, bias=bias, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class IbpResUnit(nn.Module): """ ResNet-like residual unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. bias : bool, default False Whether the layer uses a bias vector. bottleneck_factor : int, default 2 Bottleneck factor. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, stride=1, bias=False, bottleneck_factor=2, activation=(lambda: nn.ReLU(inplace=True))): super(IbpResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = IbpResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, bottleneck_factor=bottleneck_factor, activation=activation) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, activation=None) self.activ = get_activation_layer(activation) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class IbpBackbone(nn.Module): """ IBPPose backbone. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activation : function or str or None Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, activation): super(IbpBackbone, self).__init__() dilations = (3, 3, 4, 4, 5, 5) mid1_channels = out_channels // 4 mid2_channels = out_channels // 2 self.conv1 = conv7x7_block( in_channels=in_channels, out_channels=mid1_channels, stride=2, activation=activation) self.res1 = IbpResUnit( in_channels=mid1_channels, out_channels=mid2_channels, activation=activation) self.pool = nn.MaxPool2d( kernel_size=2, stride=2) self.res2 = IbpResUnit( in_channels=mid2_channels, out_channels=mid2_channels, activation=activation) self.dilation_branch = nn.Sequential() for i, dilation in enumerate(dilations): self.dilation_branch.add_module("block{}".format(i + 1), conv3x3_block( in_channels=mid2_channels, out_channels=mid2_channels, padding=dilation, dilation=dilation, activation=activation)) def forward(self, x): x = self.conv1(x) x = self.res1(x) x = self.pool(x) x = self.res2(x) y = self.dilation_branch(x) x = torch.cat((x, y), dim=1) return x class IbpDownBlock(nn.Module): """ IBPPose down block for the hourglass. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activation : function or str or None Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, activation): super(IbpDownBlock, self).__init__() self.down = nn.MaxPool2d( kernel_size=2, stride=2) self.res = IbpResUnit( in_channels=in_channels, out_channels=out_channels, activation=activation) def forward(self, x): x = self.down(x) x = self.res(x) return x class IbpUpBlock(nn.Module): """ IBPPose up block for the hourglass. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_bn : bool Whether to use BatchNorm layer. activation : function or str or None Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, use_bn, activation): super(IbpUpBlock, self).__init__() self.res = IbpResUnit( in_channels=in_channels, out_channels=out_channels, activation=activation) self.up = InterpolationBlock( scale_factor=2, mode="nearest", align_corners=None) self.conv = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=(not use_bn), use_bn=use_bn, activation=activation) def forward(self, x): x = self.res(x) x = self.up(x) x = self.conv(x) return x class MergeBlock(nn.Module): """ IBPPose merge block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_bn : bool Whether to use BatchNorm layer. """ def __init__(self, in_channels, out_channels, use_bn): super(MergeBlock, self).__init__() self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=(not use_bn), use_bn=use_bn, activation=None) def forward(self, x): return self.conv(x) class IbpPreBlock(nn.Module): """ IBPPose preliminary decoder block. Parameters: ---------- out_channels : int Number of output channels. use_bn : bool Whether to use BatchNorm layer. activation : function or str or None Activation function or name of activation function. """ def __init__(self, out_channels, use_bn, activation): super(IbpPreBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=(not use_bn), use_bn=use_bn, activation=activation) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=(not use_bn), use_bn=use_bn, activation=activation) self.se = SEBlock( channels=out_channels, use_conv=False, mid_activation=activation) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.se(x) return x class IbpPass(nn.Module): """ IBPPose single pass decoder block. Parameters: ---------- channels : int Number of input/output channels. mid_channels : int Number of middle channels. depth : int Depth of hourglass. growth_rate : int Addition for number of channel for each level. use_bn : bool Whether to use BatchNorm layer. activation : function or str or None Activation function or name of activation function. """ def __init__(self, channels, mid_channels, depth, growth_rate, merge, use_bn, activation): super(IbpPass, self).__init__() self.merge = merge down_seq = nn.Sequential() up_seq = nn.Sequential() skip_seq = nn.Sequential() top_channels = channels bottom_channels = channels for i in range(depth + 1): skip_seq.add_module("skip{}".format(i + 1), IbpResUnit( in_channels=top_channels, out_channels=top_channels, activation=activation)) bottom_channels += growth_rate if i < depth: down_seq.add_module("down{}".format(i + 1), IbpDownBlock( in_channels=top_channels, out_channels=bottom_channels, activation=activation)) up_seq.add_module("up{}".format(i + 1), IbpUpBlock( in_channels=bottom_channels, out_channels=top_channels, use_bn=use_bn, activation=activation)) top_channels = bottom_channels self.hg = Hourglass( down_seq=down_seq, up_seq=up_seq, skip_seq=skip_seq, return_first_skip=False) self.pre_block = IbpPreBlock( out_channels=channels, use_bn=use_bn, activation=activation) self.post_block = conv1x1_block( in_channels=channels, out_channels=mid_channels, bias=True, use_bn=False, activation=None) if self.merge: self.pre_merge_block = MergeBlock( in_channels=channels, out_channels=channels, use_bn=use_bn) self.post_merge_block = MergeBlock( in_channels=mid_channels, out_channels=channels, use_bn=use_bn) def forward(self, x, x_prev): x = self.hg(x) if x_prev is not None: x = x + x_prev y = self.pre_block(x) z = self.post_block(y) if self.merge: z = self.post_merge_block(z) + self.pre_merge_block(y) return z class IbpPose(nn.Module): """ IBPPose model from 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person Pose Estimation,' https://arxiv.org/abs/1911.10529. Parameters: ---------- passes : int Number of passes. backbone_out_channels : int Number of output channels for the backbone. outs_channels : int Number of output channels for the backbone. depth : int Depth of hourglass. growth_rate : int Addition for number of channel for each level. use_bn : bool Whether to use BatchNorm layer. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (256, 256) Spatial size of the expected input image. """ def __init__(self, passes, backbone_out_channels, outs_channels, depth, growth_rate, use_bn, in_channels=3, in_size=(256, 256)): super(IbpPose, self).__init__() self.in_size = in_size activation = (lambda: nn.LeakyReLU(inplace=True)) self.backbone = IbpBackbone( in_channels=in_channels, out_channels=backbone_out_channels, activation=activation) self.decoder = nn.Sequential() for i in range(passes): merge = (i != passes - 1) self.decoder.add_module("pass{}".format(i + 1), IbpPass( channels=backbone_out_channels, mid_channels=outs_channels, depth=depth, growth_rate=growth_rate, merge=merge, use_bn=use_bn, activation=activation)) self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0, 0.001) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): torch.nn.init.normal_(m.weight.data, 0, 0.01) m.bias.data.zero_() def forward(self, x): x = self.backbone(x) x_prev = None for module in self.decoder._modules.values(): if x_prev is not None: x = x + x_prev x_prev = module(x, x_prev) return x_prev def get_ibppose(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create IBPPose model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ passes = 4 backbone_out_channels = 256 outs_channels = 50 depth = 4 growth_rate = 128 use_bn = True net = IbpPose( passes=passes, backbone_out_channels=backbone_out_channels, outs_channels=outs_channels, depth=depth, growth_rate=growth_rate, use_bn=use_bn, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def ibppose_coco(**kwargs): """ IBPPose model for COCO Keypoint from 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person Pose Estimation,' https://arxiv.org/abs/1911.10529. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ibppose(model_name="ibppose_coco", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): in_size = (256, 256) pretrained = False models = [ ibppose_coco, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != ibppose_coco or weight_count == 95827784) batch = 14 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) assert ((y.shape[0] == batch) and (y.shape[1] == 50)) assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4)) if __name__ == "__main__": _test()
17,476
28.521959
117
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/xception.py
""" Xception for ImageNet-1K, implemented in PyTorch. Original paper: 'Xception: Deep Learning with Depthwise Separable Convolutions,' https://arxiv.org/abs/1610.02357. """ __all__ = ['Xception', 'xception'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block class DwsConv(nn.Module): """ Depthwise separable convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 0 Padding value for convolution layer. """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0): super(DwsConv, self).__init__() self.dw_conv = nn.Conv2d( in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=in_channels, bias=False) self.pw_conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, bias=False) def forward(self, x): x = self.dw_conv(x) x = self.pw_conv(x) return x class DwsConvBlock(nn.Module): """ Depthwise separable convolution block with batchnorm and ReLU pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. activate : bool Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, activate): super(DwsConvBlock, self).__init__() self.activate = activate if self.activate: self.activ = nn.ReLU(inplace=False) self.conv = DwsConv( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding) self.bn = nn.BatchNorm2d(num_features=out_channels) def forward(self, x): if self.activate: x = self.activ(x) x = self.conv(x) x = self.bn(x) return x def dws_conv3x3_block(in_channels, out_channels, activate): """ 3x3 version of the depthwise separable convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activate : bool Whether activate the convolution block. """ return DwsConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, activate=activate) class XceptionUnit(nn.Module): """ Xception unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the downsample polling. reps : int Number of repetitions. start_with_relu : bool, default True Whether start with ReLU activation. grow_first : bool, default True Whether start from growing. """ def __init__(self, in_channels, out_channels, stride, reps, start_with_relu=True, grow_first=True): super(XceptionUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.body = nn.Sequential() for i in range(reps): if (grow_first and (i == 0)) or ((not grow_first) and (i == reps - 1)): in_channels_i = in_channels out_channels_i = out_channels else: if grow_first: in_channels_i = out_channels out_channels_i = out_channels else: in_channels_i = in_channels out_channels_i = in_channels activate = start_with_relu if (i == 0) else True self.body.add_module("block{}".format(i + 1), dws_conv3x3_block( in_channels=in_channels_i, out_channels=out_channels_i, activate=activate)) if stride != 1: self.body.add_module("pool", nn.MaxPool2d( kernel_size=3, stride=stride, padding=1)) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity return x class XceptionInitBlock(nn.Module): """ Xception specific initial block. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(XceptionInitBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=32, stride=2, padding=0) self.conv2 = conv3x3_block( in_channels=32, out_channels=64, stride=1, padding=0) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class XceptionFinalBlock(nn.Module): """ Xception specific final block. """ def __init__(self): super(XceptionFinalBlock, self).__init__() self.conv1 = dws_conv3x3_block( in_channels=1024, out_channels=1536, activate=False) self.conv2 = dws_conv3x3_block( in_channels=1536, out_channels=2048, activate=True) self.activ = nn.ReLU(inplace=True) self.pool = nn.AvgPool2d( kernel_size=10, stride=1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.activ(x) x = self.pool(x) return x class Xception(nn.Module): """ Xception model from 'Xception: Deep Learning with Depthwise Separable Convolutions,' https://arxiv.org/abs/1610.02357. Parameters: ---------- channels : list of list of int Number of output channels for each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, in_channels=3, in_size=(299, 299), num_classes=1000): super(Xception, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", XceptionInitBlock( in_channels=in_channels)) in_channels = 64 for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stage.add_module("unit{}".format(j + 1), XceptionUnit( in_channels=in_channels, out_channels=out_channels, stride=(2 if (j == 0) else 1), reps=(2 if (j == 0) else 3), start_with_relu=((i != 0) or (j != 0)), grow_first=((i != len(channels) - 1) or (j != len(channels_per_stage) - 1)))) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", XceptionFinalBlock()) self.output = nn.Linear( in_features=2048, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_xception(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create Xception model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [[128], [256], [728] * 9, [1024]] net = Xception( channels=channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def xception(**kwargs): """ Xception model from 'Xception: Deep Learning with Depthwise Separable Convolutions,' https://arxiv.org/abs/1610.02357. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_xception(model_name="xception", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ xception, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != xception or weight_count == 22855952) x = torch.randn(1, 3, 299, 299) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
11,572
27.717122
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/darknet53.py
""" DarkNet-53 for ImageNet-1K, implemented in PyTorch. Original source: 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767. """ __all__ = ['DarkNet53', 'darknet53'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block class DarkUnit(nn.Module): """ DarkNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. alpha : float Slope coefficient for Leaky ReLU activation. """ def __init__(self, in_channels, out_channels, alpha): super(DarkUnit, self).__init__() assert (out_channels % 2 == 0) mid_channels = out_channels // 2 self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=nn.LeakyReLU( negative_slope=alpha, inplace=True)) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, activation=nn.LeakyReLU( negative_slope=alpha, inplace=True)) def forward(self, x): identity = x x = self.conv1(x) x = self.conv2(x) return x + identity class DarkNet53(nn.Module): """ DarkNet-53 model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. alpha : float, default 0.1 Slope coefficient for Leaky ReLU activation. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, alpha=0.1, in_channels=3, in_size=(224, 224), num_classes=1000): super(DarkNet53, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, activation=nn.LeakyReLU( negative_slope=alpha, inplace=True))) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): if j == 0: stage.add_module("unit{}".format(j + 1), conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, activation=nn.LeakyReLU( negative_slope=alpha, inplace=True))) else: stage.add_module("unit{}".format(j + 1), DarkUnit( in_channels=in_channels, out_channels=out_channels, alpha=alpha)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_darknet53(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DarkNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 32 layers = [2, 3, 9, 9, 5] channels_per_layers = [64, 128, 256, 512, 1024] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = DarkNet53( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def darknet53(**kwargs): """ DarkNet-53 'Reference' model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_darknet53(model_name="darknet53", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ darknet53, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != darknet53 or weight_count == 41609928) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
6,707
29.080717
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/mobilenet.py
""" MobileNet for ImageNet-1K, implemented in PyTorch. Original paper: 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. """ __all__ = ['MobileNet', 'mobilenet_w1', 'mobilenet_w3d4', 'mobilenet_wd2', 'mobilenet_wd4', 'get_mobilenet'] import os import torch.nn as nn from .common import conv3x3_block, dwsconv3x3_block class MobileNet(nn.Module): """ MobileNet model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- channels : list of list of int Number of output channels for each unit. first_stage_stride : bool Whether stride is used at the first stage. dw_use_bn : bool, default True Whether to use BatchNorm layer (depthwise convolution block). dw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the depthwise convolution block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, first_stage_stride, dw_use_bn=True, dw_activation=(lambda: nn.ReLU(inplace=True)), in_channels=3, in_size=(224, 224), num_classes=1000): super(MobileNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() init_block_channels = channels[0][0] self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels[1:]): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and ((i != 0) or first_stage_stride) else 1 stage.add_module("unit{}".format(j + 1), dwsconv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, dw_use_bn=dw_use_bn, dw_activation=dw_activation)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if 'dw_conv.conv' in name: nn.init.kaiming_normal_(module.weight, mode='fan_in') elif name == 'init_block.conv' or 'pw_conv.conv' in name: nn.init.kaiming_normal_(module.weight, mode='fan_out') elif 'bn' in name: nn.init.constant_(module.weight, 1) nn.init.constant_(module.bias, 0) elif 'output' in name: nn.init.kaiming_normal_(module.weight, mode='fan_out') nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_mobilenet(width_scale, dws_simplified=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create MobileNet model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. dws_simplified : bool, default False Whether to use simplified depthwise separable convolution block. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 512], [1024, 1024]] first_stage_stride = False if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] if dws_simplified: dw_use_bn = False dw_activation = None else: dw_use_bn = True dw_activation = (lambda: nn.ReLU(inplace=True)) net = MobileNet( channels=channels, first_stage_stride=first_stage_stride, dw_use_bn=dw_use_bn, dw_activation=dw_activation, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def mobilenet_w1(**kwargs): """ 1.0 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=1.0, model_name="mobilenet_w1", **kwargs) def mobilenet_w3d4(**kwargs): """ 0.75 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=0.75, model_name="mobilenet_w3d4", **kwargs) def mobilenet_wd2(**kwargs): """ 0.5 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=0.5, model_name="mobilenet_wd2", **kwargs) def mobilenet_wd4(**kwargs): """ 0.25 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=0.25, model_name="mobilenet_wd4", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ mobilenet_w1, mobilenet_w3d4, mobilenet_wd2, mobilenet_wd4, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != mobilenet_w1 or weight_count == 4231976) assert (model != mobilenet_w3d4 or weight_count == 2585560) assert (model != mobilenet_wd2 or weight_count == 1331592) assert (model != mobilenet_wd4 or weight_count == 470072) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
8,480
32.521739
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/dpn.py
""" DPN for ImageNet-1K, implemented in PyTorch. Original paper: 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. """ __all__ = ['DPN', 'dpn68', 'dpn68b', 'dpn98', 'dpn107', 'dpn131'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1, DualPathSequential class GlobalAvgMaxPool2D(nn.Module): """ Global average+max pooling operation for spatial data. Parameters: ---------- output_size : int, default 1 The target output size. """ def __init__(self, output_size=1): super(GlobalAvgMaxPool2D, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(output_size=output_size) self.max_pool = nn.AdaptiveMaxPool2d(output_size=output_size) def forward(self, x): x_avg = self.avg_pool(x) x_max = self.max_pool(x) x = 0.5 * (x_avg + x_max) return x def dpn_batch_norm(channels): """ DPN specific Batch normalization layer. Parameters: ---------- channels : int Number of channels in input data. """ return nn.BatchNorm2d( num_features=channels, eps=0.001) class PreActivation(nn.Module): """ DPN specific block, which performs the preactivation like in RreResNet. Parameters: ---------- channels : int Number of channels. """ def __init__(self, channels): super(PreActivation, self).__init__() self.bn = dpn_batch_norm(channels=channels) self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.bn(x) x = self.activ(x) return x class DPNConv(nn.Module): """ DPN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. groups : int Number of groups. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, groups): super(DPNConv, self).__init__() self.bn = dpn_batch_norm(channels=in_channels) self.activ = nn.ReLU(inplace=True) self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False) def forward(self, x): x = self.bn(x) x = self.activ(x) x = self.conv(x) return x def dpn_conv1x1(in_channels, out_channels, stride=1): """ 1x1 version of the DPN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. """ return DPNConv( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, groups=1) def dpn_conv3x3(in_channels, out_channels, stride, groups): """ 3x3 version of the DPN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. groups : int Number of groups. """ return DPNConv( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1, groups=groups) class DPNUnit(nn.Module): """ DPN unit. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of intermediate channels. bw : int Number of residual channels. inc : int Incrementing step for channels. groups : int Number of groups in the units. has_proj : bool Whether to use projection. key_stride : int Key strides of the convolutions. b_case : bool, default False Whether to use B-case model. """ def __init__(self, in_channels, mid_channels, bw, inc, groups, has_proj, key_stride, b_case=False): super(DPNUnit, self).__init__() self.bw = bw self.has_proj = has_proj self.b_case = b_case if self.has_proj: self.conv_proj = dpn_conv1x1( in_channels=in_channels, out_channels=bw + 2 * inc, stride=key_stride) self.conv1 = dpn_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.conv2 = dpn_conv3x3( in_channels=mid_channels, out_channels=mid_channels, stride=key_stride, groups=groups) if b_case: self.preactiv = PreActivation(channels=mid_channels) self.conv3a = conv1x1( in_channels=mid_channels, out_channels=bw) self.conv3b = conv1x1( in_channels=mid_channels, out_channels=inc) else: self.conv3 = dpn_conv1x1( in_channels=mid_channels, out_channels=bw + inc) def forward(self, x1, x2=None): x_in = torch.cat((x1, x2), dim=1) if x2 is not None else x1 if self.has_proj: x_s = self.conv_proj(x_in) x_s1 = x_s[:, :self.bw, :, :] x_s2 = x_s[:, self.bw:, :, :] else: assert (x2 is not None) x_s1 = x1 x_s2 = x2 x_in = self.conv1(x_in) x_in = self.conv2(x_in) if self.b_case: x_in = self.preactiv(x_in) y1 = self.conv3a(x_in) y2 = self.conv3b(x_in) else: x_in = self.conv3(x_in) y1 = x_in[:, :self.bw, :, :] y2 = x_in[:, self.bw:, :, :] residual = x_s1 + y1 dense = torch.cat((x_s2, y2), dim=1) return residual, dense class DPNInitBlock(nn.Module): """ DPN specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. padding : int or tuple/list of 2 int Padding value for convolution layer. """ def __init__(self, in_channels, out_channels, kernel_size, padding): super(DPNInitBlock, self).__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=2, padding=padding, bias=False) self.bn = dpn_batch_norm(channels=out_channels) self.activ = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.activ(x) x = self.pool(x) return x class DPNFinalBlock(nn.Module): """ DPN final block, which performs the preactivation with cutting. Parameters: ---------- channels : int Number of channels. """ def __init__(self, channels): super(DPNFinalBlock, self).__init__() self.activ = PreActivation(channels=channels) def forward(self, x1, x2): assert (x2 is not None) x = torch.cat((x1, x2), dim=1) x = self.activ(x) return x, None class DPN(nn.Module): """ DPN model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. init_block_kernel_size : int or tuple/list of 2 int Convolution window size for the initial unit. init_block_padding : int or tuple/list of 2 int Padding value for convolution layer in the initial unit. rs : list f int Number of intermediate channels for each unit. bws : list f int Number of residual channels for each unit. incs : list f int Incrementing step for channels for each unit. groups : int Number of groups in the units. b_case : bool Whether to use B-case model. for_training : bool Whether to use model for training. test_time_pool : bool Whether to use the avg-max pooling in the inference mode. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, init_block_kernel_size, init_block_padding, rs, bws, incs, groups, b_case, for_training, test_time_pool, in_channels=3, in_size=(224, 224), num_classes=1000): super(DPN, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = DualPathSequential( return_two=False, first_ordinals=1, last_ordinals=0) self.features.add_module("init_block", DPNInitBlock( in_channels=in_channels, out_channels=init_block_channels, kernel_size=init_block_kernel_size, padding=init_block_padding)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential() r = rs[i] bw = bws[i] inc = incs[i] for j, out_channels in enumerate(channels_per_stage): has_proj = (j == 0) key_stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), DPNUnit( in_channels=in_channels, mid_channels=r, bw=bw, inc=inc, groups=groups, has_proj=has_proj, key_stride=key_stride, b_case=b_case)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", DPNFinalBlock(channels=in_channels)) self.output = nn.Sequential() if for_training or not test_time_pool: self.output.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1)) self.output.add_module("classifier", conv1x1( in_channels=in_channels, out_channels=num_classes, bias=True)) else: self.output.add_module("avg_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output.add_module("classifier", conv1x1( in_channels=in_channels, out_channels=num_classes, bias=True)) self.output.add_module("avgmax_pool", GlobalAvgMaxPool2D()) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = self.output(x) x = x.view(x.size(0), -1) return x def get_dpn(num_layers, b_case=False, for_training=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DPN model with specific parameters. Parameters: ---------- num_layers : int Number of layers. b_case : bool, default False Whether to use B-case model. for_training : bool Whether to use model for training. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if num_layers == 68: init_block_channels = 10 init_block_kernel_size = 3 init_block_padding = 1 bw_factor = 1 k_r = 128 groups = 32 k_sec = (3, 4, 12, 3) incs = (16, 32, 32, 64) test_time_pool = True elif num_layers == 98: init_block_channels = 96 init_block_kernel_size = 7 init_block_padding = 3 bw_factor = 4 k_r = 160 groups = 40 k_sec = (3, 6, 20, 3) incs = (16, 32, 32, 128) test_time_pool = True elif num_layers == 107: init_block_channels = 128 init_block_kernel_size = 7 init_block_padding = 3 bw_factor = 4 k_r = 200 groups = 50 k_sec = (4, 8, 20, 3) incs = (20, 64, 64, 128) test_time_pool = True elif num_layers == 131: init_block_channels = 128 init_block_kernel_size = 7 init_block_padding = 3 bw_factor = 4 k_r = 160 groups = 40 k_sec = (4, 8, 28, 3) incs = (16, 32, 32, 128) test_time_pool = True else: raise ValueError("Unsupported DPN version with number of layers {}".format(num_layers)) channels = [[0] * li for li in k_sec] rs = [0 * li for li in k_sec] bws = [0 * li for li in k_sec] for i in range(len(k_sec)): rs[i] = (2 ** i) * k_r bws[i] = (2 ** i) * 64 * bw_factor inc = incs[i] channels[i][0] = bws[i] + 3 * inc for j in range(1, k_sec[i]): channels[i][j] = channels[i][j - 1] + inc net = DPN( channels=channels, init_block_channels=init_block_channels, init_block_kernel_size=init_block_kernel_size, init_block_padding=init_block_padding, rs=rs, bws=bws, incs=incs, groups=groups, b_case=b_case, for_training=for_training, test_time_pool=test_time_pool, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def dpn68(**kwargs): """ DPN-68 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dpn(num_layers=68, b_case=False, model_name="dpn68", **kwargs) def dpn68b(**kwargs): """ DPN-68b model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dpn(num_layers=68, b_case=True, model_name="dpn68b", **kwargs) def dpn98(**kwargs): """ DPN-98 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dpn(num_layers=98, b_case=False, model_name="dpn98", **kwargs) def dpn107(**kwargs): """ DPN-107 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dpn(num_layers=107, b_case=False, model_name="dpn107", **kwargs) def dpn131(**kwargs): """ DPN-131 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dpn(num_layers=131, b_case=False, model_name="dpn131", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False for_training = False models = [ dpn68, # dpn68b, dpn98, # dpn107, dpn131, ] for model in models: net = model(pretrained=pretrained, for_training=for_training) net.train() # net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != dpn68 or weight_count == 12611602) assert (model != dpn68b or weight_count == 12611602) assert (model != dpn98 or weight_count == 61570728) assert (model != dpn107 or weight_count == 86917800) assert (model != dpn131 or weight_count == 79254504) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
18,976
27.709531
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/sknet.py
""" SKNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586. """ __all__ = ['SKNet', 'sknet50', 'sknet101', 'sknet152'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent from .resnet import ResInitBlock class SKConvBlock(nn.Module): """ SKNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. groups : int, default 32 Number of groups in branches. num_branches : int, default 2 Number of branches (`M` parameter in the paper). reduction : int, default 16 Reduction value for intermediate channels (`r` parameter in the paper). min_channels : int, default 32 Minimal number of intermediate channels (`L` parameter in the paper). """ def __init__(self, in_channels, out_channels, stride, groups=32, num_branches=2, reduction=16, min_channels=32): super(SKConvBlock, self).__init__() self.num_branches = num_branches self.out_channels = out_channels mid_channels = max(in_channels // reduction, min_channels) self.branches = Concurrent(stack=True) for i in range(num_branches): dilation = 1 + i self.branches.add_module("branch{}".format(i + 2), conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, padding=dilation, dilation=dilation, groups=groups)) self.pool = nn.AdaptiveAvgPool2d(output_size=1) self.fc1 = conv1x1_block( in_channels=out_channels, out_channels=mid_channels) self.fc2 = conv1x1( in_channels=mid_channels, out_channels=(out_channels * num_branches)) self.softmax = nn.Softmax(dim=1) def forward(self, x): y = self.branches(x) u = y.sum(dim=1) s = self.pool(u) z = self.fc1(s) w = self.fc2(z) batch = w.size(0) w = w.view(batch, self.num_branches, self.out_channels) w = self.softmax(w) w = w.unsqueeze(-1).unsqueeze(-1) y = y * w y = y.sum(dim=1) return y class SKNetBottleneck(nn.Module): """ SKNet bottleneck block for residual path in SKNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck_factor : int, default 2 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, bottleneck_factor=2): super(SKNetBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = SKConvBlock( in_channels=mid_channels, out_channels=mid_channels, stride=stride) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class SKNetUnit(nn.Module): """ SKNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(SKNetUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = SKNetBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class SKNet(nn.Module): """ SKNet model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, in_channels=3, in_size=(224, 224), num_classes=1000): super(SKNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), SKNetUnit( in_channels=in_channels, out_channels=out_channels, stride=stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_sknet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SKNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] else: raise ValueError("Unsupported SKNet with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = SKNet( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def sknet50(**kwargs): """ SKNet-50 model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sknet(blocks=50, model_name="sknet50", **kwargs) def sknet101(**kwargs): """ SKNet-101 model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sknet(blocks=101, model_name="sknet101", **kwargs) def sknet152(**kwargs): """ SKNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sknet(blocks=152, model_name="sknet152", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ sknet50, sknet101, sknet152, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != sknet50 or weight_count == 27479784) assert (model != sknet101 or weight_count == 48736040) assert (model != sknet152 or weight_count == 66295656) x = torch.randn(14, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (14, 1000)) if __name__ == "__main__": _test()
10,908
28.563686
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/spnasnet.py
""" Single-Path NASNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours,' https://arxiv.org/abs/1904.02877. """ __all__ = ['SPNASNet', 'spnasnet'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block class SPNASUnit(nn.Module): """ Single-Path NASNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the second convolution layer. use_kernel3 : bool Whether to use 3x3 (instead of 5x5) kernel. exp_factor : int Expansion factor for each unit. use_skip : bool, default True Whether to use skip connection. activation : str, default 'relu' Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, stride, use_kernel3, exp_factor, use_skip=True, activation="relu"): super(SPNASUnit, self).__init__() assert (exp_factor >= 1) self.residual = (in_channels == out_channels) and (stride == 1) and use_skip self.use_exp_conv = exp_factor > 1 mid_channels = exp_factor * in_channels if self.use_exp_conv: self.exp_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=activation) if use_kernel3: self.conv1 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=activation) else: self.conv1 = dwconv5x5_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=activation) self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): if self.residual: identity = x if self.use_exp_conv: x = self.exp_conv(x) x = self.conv1(x) x = self.conv2(x) if self.residual: x = x + identity return x class SPNASInitBlock(nn.Module): """ Single-Path NASNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. """ def __init__(self, in_channels, out_channels, mid_channels): super(SPNASInitBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2) self.conv2 = SPNASUnit( in_channels=mid_channels, out_channels=out_channels, stride=1, use_kernel3=True, exp_factor=1, use_skip=False) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class SPNASFinalBlock(nn.Module): """ Single-Path NASNet specific final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. """ def __init__(self, in_channels, out_channels, mid_channels): super(SPNASFinalBlock, self).__init__() self.conv1 = SPNASUnit( in_channels=in_channels, out_channels=mid_channels, stride=1, use_kernel3=True, exp_factor=6, use_skip=False) self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class SPNASNet(nn.Module): """ Single-Path NASNet model from 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours,' https://arxiv.org/abs/1904.02877. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : list of 2 int Number of output channels for the initial unit. final_block_channels : list of 2 int Number of output channels for the final block of the feature extractor. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. exp_factors : list of list of int Expansion factor for each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, kernels3, exp_factors, in_channels=3, in_size=(224, 224), num_classes=1000): super(SPNASNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", SPNASInitBlock( in_channels=in_channels, out_channels=init_block_channels[1], mid_channels=init_block_channels[0])) in_channels = init_block_channels[1] for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if ((j == 0) and (i != 3)) or ((j == len(channels_per_stage) // 2) and (i == 3)) else 1 use_kernel3 = kernels3[i][j] == 1 exp_factor = exp_factors[i][j] stage.add_module("unit{}".format(j + 1), SPNASUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, use_kernel3=use_kernel3, exp_factor=exp_factor)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", SPNASFinalBlock( in_channels=in_channels, out_channels=final_block_channels[1], mid_channels=final_block_channels[0])) in_channels = final_block_channels[1] self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_spnasnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create Single-Path NASNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = [32, 16] final_block_channels = [320, 1280] channels = [[24, 24, 24], [40, 40, 40, 40], [80, 80, 80, 80], [96, 96, 96, 96, 192, 192, 192, 192]] kernels3 = [[1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0]] exp_factors = [[3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3, 6, 6, 6, 6]] net = SPNASNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, kernels3=kernels3, exp_factors=exp_factors, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def spnasnet(**kwargs): """ Single-Path NASNet model from 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours,' https://arxiv.org/abs/1904.02877. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_spnasnet(model_name="spnasnet", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ spnasnet, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != spnasnet or weight_count == 4421616) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
10,388
30.10479
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/fastscnn.py
""" Fast-SCNN for image segmentation, implemented in PyTorch. Original paper: 'Fast-SCNN: Fast Semantic Segmentation Network,' https://arxiv.org/abs/1902.04502. """ __all__ = ['FastSCNN', 'fastscnn_cityscapes'] import os import torch.nn as nn from .common import conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwsconv3x3_block, Concurrent,\ InterpolationBlock, Identity class Stem(nn.Module): """ Fast-SCNN specific stem block. Parameters: ---------- in_channels : int Number of input channels. channels : tuple/list of 3 int Number of output channels. """ def __init__(self, in_channels, channels): super(Stem, self).__init__() assert (len(channels) == 3) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=channels[0], stride=2, padding=0) self.conv2 = dwsconv3x3_block( in_channels=channels[0], out_channels=channels[1], stride=2) self.conv3 = dwsconv3x3_block( in_channels=channels[1], out_channels=channels[2], stride=2) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class LinearBottleneck(nn.Module): """ Fast-SCNN specific Linear Bottleneck layer from MobileNetV2. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the second convolution layer. """ def __init__(self, in_channels, out_channels, stride): super(LinearBottleneck, self).__init__() self.residual = (in_channels == out_channels) and (stride == 1) mid_channels = in_channels * 6 self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): if self.residual: identity = x x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) if self.residual: x = x + identity return x class FeatureExtractor(nn.Module): """ Fast-SCNN specific feature extractor/encoder. Parameters: ---------- in_channels : int Number of input channels. channels : list of list of int Number of output channels for each unit. """ def __init__(self, in_channels, channels): super(FeatureExtractor, self).__init__() self.features = nn.Sequential() for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != len(channels) - 1) else 1 stage.add_module("unit{}".format(j + 1), LinearBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) def forward(self, x): x = self.features(x) return x class PoolingBranch(nn.Module): """ Fast-SCNN specific pooling branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. in_size : tuple of 2 int or None Spatial size of input image. down_size : int Spatial size of downscaled image. """ def __init__(self, in_channels, out_channels, in_size, down_size): super(PoolingBranch, self).__init__() self.in_size = in_size self.pool = nn.AdaptiveAvgPool2d(output_size=down_size) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels) self.up = InterpolationBlock( scale_factor=None, out_size=in_size) def forward(self, x): in_size = self.in_size if self.in_size is not None else x.shape[2:] x = self.pool(x) x = self.conv(x) x = self.up(x, in_size) return x class FastPyramidPooling(nn.Module): """ Fast-SCNN specific fast pyramid pooling block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. in_size : tuple of 2 int or None Spatial size of input image. """ def __init__(self, in_channels, out_channels, in_size): super(FastPyramidPooling, self).__init__() down_sizes = [1, 2, 3, 6] mid_channels = in_channels // 4 self.branches = Concurrent() self.branches.add_module("branch1", Identity()) for i, down_size in enumerate(down_sizes): self.branches.add_module("branch{}".format(i + 2), PoolingBranch( in_channels=in_channels, out_channels=mid_channels, in_size=in_size, down_size=down_size)) self.conv = conv1x1_block( in_channels=(in_channels * 2), out_channels=out_channels) def forward(self, x): x = self.branches(x) x = self.conv(x) return x class FeatureFusion(nn.Module): """ Fast-SCNN specific feature fusion block. Parameters: ---------- x_in_channels : int Number of high resolution (x) input channels. y_in_channels : int Number of low resolution (y) input channels. out_channels : int Number of output channels. x_in_size : tuple of 2 int or None Spatial size of high resolution (x) input image. """ def __init__(self, x_in_channels, y_in_channels, out_channels, x_in_size): super(FeatureFusion, self).__init__() self.x_in_size = x_in_size self.up = InterpolationBlock( scale_factor=None, out_size=x_in_size) self.low_dw_conv = dwconv3x3_block( in_channels=y_in_channels, out_channels=out_channels) self.low_pw_conv = conv1x1_block( in_channels=out_channels, out_channels=out_channels, bias=True, activation=None) self.high_conv = conv1x1_block( in_channels=x_in_channels, out_channels=out_channels, bias=True, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x, y): x_in_size = self.x_in_size if self.x_in_size is not None else x.shape[2:] y = self.up(y, x_in_size) y = self.low_dw_conv(y) y = self.low_pw_conv(y) x = self.high_conv(x) out = x + y return self.activ(out) class Head(nn.Module): """ Fast-SCNN head (classifier) block. Parameters: ---------- in_channels : int Number of input channels. num_classes : int Number of classification classes. """ def __init__(self, in_channels, num_classes): super(Head, self).__init__() self.conv1 = dwsconv3x3_block( in_channels=in_channels, out_channels=in_channels) self.conv2 = dwsconv3x3_block( in_channels=in_channels, out_channels=in_channels) self.dropout = nn.Dropout(p=0.1, inplace=False) self.conv3 = conv1x1( in_channels=in_channels, out_channels=num_classes, bias=True) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.dropout(x) x = self.conv3(x) return x class AuxHead(nn.Module): """ Fast-SCNN auxiliary (after stem) head (classifier) block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. num_classes : int Number of classification classes. """ def __init__(self, in_channels, mid_channels, num_classes): super(AuxHead, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels) self.dropout = nn.Dropout(p=0.1, inplace=False) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=num_classes, bias=True) def forward(self, x): x = self.conv1(x) x = self.dropout(x) x = self.conv2(x) return x class FastSCNN(nn.Module): """ Fast-SCNN from 'Fast-SCNN: Fast Semantic Segmentation Network,' https://arxiv.org/abs/1902.04502. Parameters: ---------- aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default True Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 1024) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, aux=False, fixed_size=True, in_channels=3, in_size=(1024, 1024), num_classes=19): super(FastSCNN, self).__init__() assert (in_channels > 0) assert ((in_size[0] % 32 == 0) and (in_size[1] % 32 == 0)) self.in_size = in_size self.num_classes = num_classes self.aux = aux self.fixed_size = fixed_size steam_channels = [32, 48, 64] self.stem = Stem( in_channels=in_channels, channels=steam_channels) in_channels = steam_channels[-1] feature_channels = [[64, 64, 64], [96, 96, 96], [128, 128, 128]] self.features = FeatureExtractor( in_channels=in_channels, channels=feature_channels) pool_out_size = (in_size[0] // 32, in_size[1] // 32) if fixed_size else None self.pool = FastPyramidPooling( in_channels=feature_channels[-1][-1], out_channels=feature_channels[-1][-1], in_size=pool_out_size) fusion_out_size = (in_size[0] // 8, in_size[1] // 8) if fixed_size else None fusion_out_channels = 128 self.fusion = FeatureFusion( x_in_channels=steam_channels[-1], y_in_channels=feature_channels[-1][-1], out_channels=fusion_out_channels, x_in_size=fusion_out_size) self.head = Head( in_channels=fusion_out_channels, num_classes=num_classes) self.up = InterpolationBlock( scale_factor=None, out_size=in_size) if self.aux: self.aux_head = AuxHead( in_channels=64, mid_channels=64, num_classes=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): in_size = self.in_size if self.fixed_size else x.shape[2:] x = self.stem(x) y = self.features(x) y = self.pool(y) y = self.fusion(x, y) y = self.head(y) y = self.up(y, in_size) if self.aux: x = self.aux_head(x) x = self.up(x, in_size) return y, x return y def get_fastscnn(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create Fast-SCNN model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = FastSCNN( **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def fastscnn_cityscapes(num_classes=19, aux=True, **kwargs): """ Fast-SCNN model for Cityscapes from 'Fast-SCNN: Fast Semantic Segmentation Network,' https://arxiv.org/abs/1902.04502. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fastscnn(num_classes=num_classes, aux=aux, model_name="fastscnn_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch in_size = (1024, 2048) aux = True fixed_size = False pretrained = False models = [ (fastscnn_cityscapes, 19), ] for model, num_classes in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size, aux=aux) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) if aux: assert (model != fastscnn_cityscapes or weight_count == 1176278) else: assert (model != fastscnn_cityscapes or weight_count == 1138051) x = torch.randn(1, 3, in_size[0], in_size[1]) ys = net(x) y = ys[0] if aux else ys y.sum().backward() assert ((y.size(0) == x.size(0)) and (y.size(1) == num_classes) and (y.size(2) == x.size(2)) and (y.size(3) == x.size(3))) if __name__ == "__main__": _test()
15,264
28.814453
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/esnet.py
""" ESNet for image segmentation, implemented in PyTorch. Original paper: 'ESNet: An Efficient Symmetric Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1906.09826. """ __all__ = ['ESNet', 'esnet_cityscapes'] import os import torch import torch.nn as nn from .common import AsymConvBlock, deconv3x3_block, Concurrent from .enet import ENetMixDownBlock from .erfnet import FCU class PFCUBranch(nn.Module): """ Parallel factorized convolution unit's branch. Parameters: ---------- channels : int Number of input/output channels. kernel_size : int Convolution window size. dilation : int Dilation value for convolution layer. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, channels, kernel_size, dilation, dropout_rate, bn_eps): super(PFCUBranch, self).__init__() self.use_dropout = (dropout_rate != 0.0) self.conv = AsymConvBlock( channels=channels, kernel_size=kernel_size, padding=dilation, dilation=dilation, bias=True, lw_use_bn=False, bn_eps=bn_eps, rw_activation=None) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x): x = self.conv(x) if self.use_dropout: x = self.dropout(x) return x class PFCU(nn.Module): """ Parallel factorized convolution unit. Parameters: ---------- channels : int Number of input/output channels. kernel_size : int Convolution window size. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, channels, kernel_size, dropout_rate, bn_eps): super(PFCU, self).__init__() dilations = [2, 5, 9] padding = (kernel_size - 1) // 2 self.conv1 = AsymConvBlock( channels=channels, kernel_size=kernel_size, padding=padding, bias=True, lw_use_bn=False, bn_eps=bn_eps) self.branches = Concurrent(merge_type="sum") for i, dilation in enumerate(dilations): self.branches.add_module("branch{}".format(i + 1), PFCUBranch( channels=channels, kernel_size=kernel_size, dilation=dilation, dropout_rate=dropout_rate, bn_eps=bn_eps)) self.activ = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.conv1(x) x = self.branches(x) x = x + identity x = self.activ(x) return x class ESNet(nn.Module): """ ESNet model from 'ESNet: An Efficient Symmetric Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1906.09826. Parameters: ---------- layers : list of list of int Number of layers in each stage of encoder and decoder. channels : list of list of int Number of output channels for each in encoder and decoder. kernel_sizes : list of list of int Kernel size for each in encoder and decoder. dropout_rates : list of list of int Dropout rates for each unit in encoder and decoder. correct_size_mistmatch : bool Whether to correct downscaled sizes of images in encoder. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, layers, channels, kernel_sizes, dropout_rates, correct_size_mismatch=False, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(ESNet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size self.encoder = nn.Sequential() for i, layers_per_stage in enumerate(layers[0]): out_channels = channels[0][i] kernel_size = kernel_sizes[0][i] dropout_rate = dropout_rates[0][i] stage = nn.Sequential() for j in range(layers_per_stage): if j == 0: stage.add_module("unit{}".format(j + 1), ENetMixDownBlock( in_channels=in_channels, out_channels=out_channels, bias=True, bn_eps=bn_eps, correct_size_mismatch=correct_size_mismatch)) in_channels = out_channels elif i != len(layers[0]) - 1: stage.add_module("unit{}".format(j + 1), FCU( channels=in_channels, kernel_size=kernel_size, dilation=1, dropout_rate=dropout_rate, bn_eps=bn_eps)) else: stage.add_module("unit{}".format(j + 1), PFCU( channels=in_channels, kernel_size=kernel_size, dropout_rate=dropout_rate, bn_eps=bn_eps)) self.encoder.add_module("stage{}".format(i + 1), stage) self.decoder = nn.Sequential() for i, layers_per_stage in enumerate(layers[1]): out_channels = channels[1][i] kernel_size = kernel_sizes[1][i] stage = nn.Sequential() for j in range(layers_per_stage): if j == 0: stage.add_module("unit{}".format(j + 1), deconv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, bias=True, bn_eps=bn_eps)) in_channels = out_channels else: stage.add_module("unit{}".format(j + 1), FCU( channels=in_channels, kernel_size=kernel_size, dilation=1, dropout_rate=0, bn_eps=bn_eps)) self.decoder.add_module("stage{}".format(i + 1), stage) self.head = nn.ConvTranspose2d( in_channels=in_channels, out_channels=num_classes, kernel_size=2, stride=2, padding=0, output_padding=0, bias=True) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.encoder(x) x = self.decoder(x) x = self.head(x) return x def get_esnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ESNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ layers = [[4, 3, 4], [3, 3]] channels = [[16, 64, 128], [64, 16]] kernel_sizes = [[3, 5, 3], [5, 3]] dropout_rates = [[0.03, 0.03, 0.3], [0, 0]] bn_eps = 1e-3 net = ESNet( layers=layers, channels=channels, kernel_sizes=kernel_sizes, dropout_rates=dropout_rates, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def esnet_cityscapes(num_classes=19, **kwargs): """ ESNet model for Cityscapes from 'ESNet: An Efficient Symmetric Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1906.09826. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_esnet(num_classes=num_classes, model_name="esnet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False fixed_size = True correct_size_mismatch = False in_size = (1024, 2048) classes = 19 models = [ esnet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size, correct_size_mismatch=correct_size_mismatch) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != esnet_cityscapes or weight_count == 1660607) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
10,912
31.002933
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/enet.py
""" ENet for image segmentation, implemented in PyTorch. Original paper: 'ENet: A Deep Neural Network Architecture for Real-Time Semantic Segmentation,' https://arxiv.org/abs/1606.02147. """ __all__ = ['ENet', 'enet_cityscapes', 'ENetMixDownBlock'] import os import torch import torch.nn as nn import torch.nn.functional as F from .common import conv3x3, ConvBlock, AsymConvBlock, DeconvBlock, NormActivation, conv1x1_block class ENetMaxDownBlock(nn.Module): """ ENet specific max-pooling downscale block. Parameters: ---------- ext_channels : int Number of extra channels. kernel_size : int or tuple/list of 2 int Convolution window size. padding : int, or tuple/list of 2 int, or tuple/list of 4 int Padding value for convolution layer. """ def __init__(self, ext_channels, kernel_size, padding): super(ENetMaxDownBlock, self).__init__() self.ext_channels = ext_channels self.pool = nn.MaxPool2d( kernel_size=kernel_size, stride=2, padding=padding, return_indices=True) def forward(self, x): x, max_indices = self.pool(x) branch, _, height, width = x.size() pad = torch.zeros(branch, self.ext_channels, height, width, dtype=x.dtype, device=x.device) x = torch.cat((x, pad), dim=1) return x, max_indices class ENetUpBlock(nn.Module): """ ENet upscale block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bias : bool Whether the layer uses a bias vector. """ def __init__(self, in_channels, out_channels, bias): super(ENetUpBlock, self).__init__() self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=bias, activation=None) self.unpool = nn.MaxUnpool2d(kernel_size=2) def forward(self, x, max_indices): x = self.conv(x) x = self.unpool(x, max_indices) return x class ENetUnit(nn.Module): """ ENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. padding : int, or tuple/list of 2 int, or tuple/list of 4 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_asym_convs : bool Whether to use asymmetric convolution blocks. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. bias : bool Whether the layer uses a bias vector. activation : function or str or None Activation function or name of activation function. downs : bool Whether to downscale or upscale. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, kernel_size, padding, dilation, use_asym_conv, dropout_rate, bias, activation, down, bottleneck_factor=4): super(ENetUnit, self).__init__() self.resize_identity = (in_channels != out_channels) self.down = down mid_channels = in_channels // bottleneck_factor if not self.resize_identity: self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bias=bias, activation=activation) if use_asym_conv: self.conv2 = AsymConvBlock( channels=mid_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, bias=bias, lw_activation=activation, rw_activation=activation) else: self.conv2 = ConvBlock( in_channels=mid_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=bias, activation=activation) elif self.down: self.identity_block = ENetMaxDownBlock( ext_channels=(out_channels - in_channels), kernel_size=kernel_size, padding=padding) self.conv1 = ConvBlock( in_channels=in_channels, out_channels=mid_channels, kernel_size=2, stride=2, padding=0, dilation=1, bias=bias, activation=activation) self.conv2 = ConvBlock( in_channels=mid_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=bias, activation=activation) else: self.identity_block = ENetUpBlock( in_channels=in_channels, out_channels=out_channels, bias=bias) self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bias=bias, activation=activation) self.conv2 = DeconvBlock( in_channels=mid_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=2, padding=padding, out_padding=1, dilation=dilation, bias=bias, activation=activation) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, bias=bias, activation=activation) self.dropout = nn.Dropout2d(p=dropout_rate) self.activ = activation() def forward(self, x, max_indices=None): if not self.resize_identity: identity = x elif self.down: identity, max_indices = self.identity_block(x) else: identity = self.identity_block(x, max_indices) x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.dropout(x) x = x + identity x = self.activ(x) if self.resize_identity and self.down: return x, max_indices else: return x class ENetStage(nn.Module): """ ENet stage. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_sizes : list of int Kernel sizes. paddings : list of int Padding values. dilations : list of int Dilation values. use_asym_convs : list of int Whether to use asymmetric convolution blocks. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. bias : bool Whether the layer uses a bias vector. activation : function or str or None Activation function or name of activation function. downs : bool Whether to downscale or upscale. """ def __init__(self, in_channels, out_channels, kernel_sizes, paddings, dilations, use_asym_convs, dropout_rate, bias, activation, down): super(ENetStage, self).__init__() self.down = down units = nn.Sequential() for i, kernel_size in enumerate(kernel_sizes): unit = ENetUnit( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=paddings[i], dilation=dilations[i], use_asym_conv=(use_asym_convs[i] == 1), dropout_rate=dropout_rate, bias=bias, activation=activation, down=down) if i == 0: self.scale_unit = unit else: units.add_module("unit{}".format(i + 1), unit) in_channels = out_channels self.units = units def forward(self, x, max_indices=None): if self.down: x, max_indices = self.scale_unit(x) else: x = self.scale_unit(x, max_indices) x = self.units(x) if self.down: return x, max_indices else: return x class ENetMixDownBlock(nn.Module): """ ENet specific mixed downscale block, used as an initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. correct_size_mistmatch : bool, default False Whether to correct downscaled sizes of images. """ def __init__(self, in_channels, out_channels, bias=False, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True)), correct_size_mismatch=False): super(ENetMixDownBlock, self).__init__() self.correct_size_mismatch = correct_size_mismatch self.pool = nn.MaxPool2d( kernel_size=2, stride=2) self.conv = conv3x3( in_channels=in_channels, out_channels=(out_channels - in_channels), stride=2, bias=bias) self.norm_activ = NormActivation( in_channels=out_channels, bn_eps=bn_eps, activation=activation) def forward(self, x): y1 = self.pool(x) y2 = self.conv(x) if self.correct_size_mismatch: diff_h = y2.size()[2] - y1.size()[2] diff_w = y2.size()[3] - y1.size()[3] y1 = F.pad(y1, pad=(diff_w // 2, diff_w - diff_w // 2, diff_h // 2, diff_h - diff_h // 2)) x = torch.cat((y2, y1), dim=1) x = self.norm_activ(x) return x class ENet(nn.Module): """ ENet model from 'ENet: A Deep Neural Network Architecture for Real-Time Semantic Segmentation,' https://arxiv.org/abs/1606.02147. Parameters: ---------- channels : list of int Number of output channels for the first unit of each stage. init_block_channels : int Number of output channels for the initial unit. kernel_sizes : list of list of int Kernel sizes for each unit. paddings : list of list of int Padding values for each unit. dilations : list of list of int Dilation values for each unit. use_asym_convs : list of list of int Whether to use asymmetric convolution blocks for each unit. dropout_rates : list of float Parameter of dropout layer for each stage. downs : list of int Whether to downscale or upscale in each stage. correct_size_mistmatch : bool Whether to correct downscaled sizes of images in encoder. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, channels, init_block_channels, kernel_sizes, paddings, dilations, use_asym_convs, dropout_rates, downs, correct_size_mismatch=False, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(ENet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size bias = False encoder_activation = (lambda: nn.PReLU(1)) decoder_activation = (lambda: nn.ReLU(inplace=True)) self.stem = ENetMixDownBlock( in_channels=in_channels, out_channels=init_block_channels, bias=bias, bn_eps=bn_eps, activation=encoder_activation, correct_size_mismatch=correct_size_mismatch) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): setattr(self, "stage{}".format(i + 1), ENetStage( in_channels=in_channels, out_channels=channels_per_stage, kernel_sizes=kernel_sizes[i], paddings=paddings[i], dilations=dilations[i], use_asym_convs=use_asym_convs[i], dropout_rate=dropout_rates[i], bias=bias, activation=(encoder_activation if downs[i] == 1 else decoder_activation), down=(downs[i] == 1))) in_channels = channels_per_stage self.head = nn.ConvTranspose2d( in_channels=in_channels, out_channels=num_classes, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.stem(x) x, max_indices1 = self.stage1(x) x, max_indices2 = self.stage2(x) x = self.stage3(x, max_indices2) x = self.stage4(x, max_indices1) x = self.head(x) return x def get_enet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ENet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [64, 128, 64, 16] kernel_sizes = [[3, 3, 3, 3, 3], [3, 3, 3, 5, 3, 3, 3, 5, 3, 3, 3, 5, 3, 3, 3, 5, 3], [3, 3, 3], [3, 3]] paddings = [[1, 1, 1, 1, 1], [1, 1, 2, 2, 4, 1, 8, 2, 16, 1, 2, 2, 4, 1, 8, 2, 16], [1, 1, 1], [1, 1]] dilations = [[1, 1, 1, 1, 1], [1, 1, 2, 1, 4, 1, 8, 1, 16, 1, 2, 1, 4, 1, 8, 1, 16], [1, 1, 1], [1, 1]] use_asym_convs = [[0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0], [0, 0, 0], [0, 0]] dropout_rates = [0.01, 0.1, 0.1, 0.1] downs = [1, 1, 0, 0] init_block_channels = 16 net = ENet( channels=channels, init_block_channels=init_block_channels, kernel_sizes=kernel_sizes, paddings=paddings, dilations=dilations, use_asym_convs=use_asym_convs, dropout_rates=dropout_rates, downs=downs, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def enet_cityscapes(num_classes=19, **kwargs): """ ENet model for Cityscapes from 'ENet: A Deep Neural Network Architecture for Real-Time Semantic Segmentation,' https://arxiv.org/abs/1606.02147. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_enet(num_classes=num_classes, model_name="enet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ enet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != enet_cityscapes or weight_count == 358060) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
18,480
31.14087
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/darknet.py
""" DarkNet for ImageNet-1K, implemented in PyTorch. Original source: 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet. """ __all__ = ['DarkNet', 'darknet_ref', 'darknet_tiny', 'darknet19'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block def dark_convYxY(in_channels, out_channels, alpha, pointwise): """ DarkNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. alpha : float Slope coefficient for Leaky ReLU activation. pointwise : bool Whether use 1x1 (pointwise) convolution or 3x3 convolution. """ if pointwise: return conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=nn.LeakyReLU( negative_slope=alpha, inplace=True)) else: return conv3x3_block( in_channels=in_channels, out_channels=out_channels, activation=nn.LeakyReLU( negative_slope=alpha, inplace=True)) class DarkNet(nn.Module): """ DarkNet model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet. Parameters: ---------- channels : list of list of int Number of output channels for each unit. odd_pointwise : bool Whether pointwise convolution layer is used for each odd unit. avg_pool_size : int Window size of the final average pooling. cls_activ : bool Whether classification convolution layer uses an activation. alpha : float, default 0.1 Slope coefficient for Leaky ReLU activation. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, odd_pointwise, avg_pool_size, cls_activ, alpha=0.1, in_channels=3, in_size=(224, 224), num_classes=1000): super(DarkNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stage.add_module("unit{}".format(j + 1), dark_convYxY( in_channels=in_channels, out_channels=out_channels, alpha=alpha, pointwise=(len(channels_per_stage) > 1) and not (((j + 1) % 2 == 1) ^ odd_pointwise))) in_channels = out_channels if i != len(channels) - 1: stage.add_module("pool{}".format(i + 1), nn.MaxPool2d( kernel_size=2, stride=2)) self.features.add_module("stage{}".format(i + 1), stage) self.output = nn.Sequential() self.output.add_module("final_conv", nn.Conv2d( in_channels=in_channels, out_channels=num_classes, kernel_size=1)) if cls_activ: self.output.add_module("final_activ", nn.LeakyReLU( negative_slope=alpha, inplace=True)) self.output.add_module("final_pool", nn.AvgPool2d( kernel_size=avg_pool_size, stride=1)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): if "final_conv" in name: init.normal_(module.weight, mean=0.0, std=0.01) else: init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = self.output(x) x = x.view(x.size(0), -1) return x def get_darknet(version, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DarkNet model with specific parameters. Parameters: ---------- version : str Version of SqueezeNet ('ref', 'tiny' or '19'). model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if version == 'ref': channels = [[16], [32], [64], [128], [256], [512], [1024]] odd_pointwise = False avg_pool_size = 3 cls_activ = True elif version == 'tiny': channels = [[16], [32], [16, 128, 16, 128], [32, 256, 32, 256], [64, 512, 64, 512, 128]] odd_pointwise = True avg_pool_size = 14 cls_activ = False elif version == '19': channels = [[32], [64], [128, 64, 128], [256, 128, 256], [512, 256, 512, 256, 512], [1024, 512, 1024, 512, 1024]] odd_pointwise = False avg_pool_size = 7 cls_activ = False else: raise ValueError("Unsupported DarkNet version {}".format(version)) net = DarkNet( channels=channels, odd_pointwise=odd_pointwise, avg_pool_size=avg_pool_size, cls_activ=cls_activ, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def darknet_ref(**kwargs): """ DarkNet 'Reference' model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_darknet(version="ref", model_name="darknet_ref", **kwargs) def darknet_tiny(**kwargs): """ DarkNet Tiny model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_darknet(version="tiny", model_name="darknet_tiny", **kwargs) def darknet19(**kwargs): """ DarkNet-19 model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_darknet(version="19", model_name="darknet19", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False models = [ darknet_ref, darknet_tiny, darknet19, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != darknet_ref or weight_count == 7319416) assert (model != darknet_tiny or weight_count == 1042104) assert (model != darknet19 or weight_count == 20842376) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
8,529
30.360294
116
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/ror_cifar.py
""" RoR-3 for CIFAR/SVHN, implemented in PyTorch. Original paper: 'Residual Networks of Residual Networks: Multilevel Residual Networks,' https://arxiv.org/abs/1608.02908. """ __all__ = ['CIFARRoR', 'ror3_56_cifar10', 'ror3_56_cifar100', 'ror3_56_svhn', 'ror3_110_cifar10', 'ror3_110_cifar100', 'ror3_110_svhn', 'ror3_164_cifar10', 'ror3_164_cifar100', 'ror3_164_svhn'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block class RoRBlock(nn.Module): """ RoR-3 block for residual path in residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels, dropout_rate): super(RoRBlock, self).__init__() self.use_dropout = (dropout_rate != 0.0) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, activation=None) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x): x = self.conv1(x) if self.use_dropout: x = self.dropout(x) x = self.conv2(x) return x class RoRResUnit(nn.Module): """ RoR-3 residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. last_activate : bool, default True Whether activate output. """ def __init__(self, in_channels, out_channels, dropout_rate, last_activate=True): super(RoRResUnit, self).__init__() self.last_activate = last_activate self.resize_identity = (in_channels != out_channels) self.body = RoRBlock( in_channels=in_channels, out_channels=out_channels, dropout_rate=dropout_rate) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity if self.last_activate: x = self.activ(x) return x class RoRResStage(nn.Module): """ RoR-3 residual stage. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of int Number of output channels for each unit. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. downsample : bool, default True Whether downsample output. """ def __init__(self, in_channels, out_channels_list, dropout_rate, downsample=True): super(RoRResStage, self).__init__() self.downsample = downsample self.shortcut = conv1x1_block( in_channels=in_channels, out_channels=out_channels_list[-1], activation=None) self.units = nn.Sequential() for i, out_channels in enumerate(out_channels_list): last_activate = (i != len(out_channels_list) - 1) self.units.add_module("unit{}".format(i + 1), RoRResUnit( in_channels=in_channels, out_channels=out_channels, dropout_rate=dropout_rate, last_activate=last_activate)) in_channels = out_channels if self.downsample: self.activ = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d( kernel_size=2, stride=2, padding=0) def forward(self, x): identity = self.shortcut(x) x = self.units(x) x = x + identity if self.downsample: x = self.activ(x) x = self.pool(x) return x class RoRResBody(nn.Module): """ RoR-3 residual body (main feature path). Parameters: ---------- in_channels : int Number of input channels. out_channels_lists : list of list of int Number of output channels for each stage. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels_lists, dropout_rate): super(RoRResBody, self).__init__() self.shortcut = conv1x1_block( in_channels=in_channels, out_channels=out_channels_lists[-1][-1], stride=4, activation=None) self.stages = nn.Sequential() for i, channels_per_stage in enumerate(out_channels_lists): downsample = (i != len(out_channels_lists) - 1) self.stages.add_module("stage{}".format(i + 1), RoRResStage( in_channels=in_channels, out_channels_list=channels_per_stage, dropout_rate=dropout_rate, downsample=downsample)) in_channels = channels_per_stage[-1] self.activ = nn.ReLU(inplace=True) def forward(self, x): identity = self.shortcut(x) x = self.stages(x) x = x + identity x = self.activ(x) return x class CIFARRoR(nn.Module): """ RoR-3 model for CIFAR from 'Residual Networks of Residual Networks: Multilevel Residual Networks,' https://arxiv.org/abs/1608.02908. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, dropout_rate=0.0, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARRoR, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels self.features.add_module("body", RoRResBody( in_channels=in_channels, out_channels_lists=channels, dropout_rate=dropout_rate)) in_channels = channels[-1][-1] self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_ror_cifar(num_classes, blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create RoR-3 model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification classes. blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (num_classes in [10, 100]) assert ((blocks - 8) % 6 == 0) layers = [(blocks - 8) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = CIFARRoR( channels=channels, init_block_channels=init_block_channels, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def ror3_56_cifar10(num_classes=10, **kwargs): """ RoR-3-56 model for CIFAR-10 from 'Residual Networks of Residual Networks: Multilevel Residual Networks,' https://arxiv.org/abs/1608.02908. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ror_cifar(num_classes=num_classes, blocks=56, model_name="ror3_56_cifar10", **kwargs) def ror3_56_cifar100(num_classes=100, **kwargs): """ RoR-3-56 model for CIFAR-100 from 'Residual Networks of Residual Networks: Multilevel Residual Networks,' https://arxiv.org/abs/1608.02908. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ror_cifar(num_classes=num_classes, blocks=56, model_name="ror3_56_cifar100", **kwargs) def ror3_56_svhn(num_classes=10, **kwargs): """ RoR-3-56 model for SVHN from 'Residual Networks of Residual Networks: Multilevel Residual Networks,' https://arxiv.org/abs/1608.02908. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ror_cifar(num_classes=num_classes, blocks=56, model_name="ror3_56_svhn", **kwargs) def ror3_110_cifar10(num_classes=10, **kwargs): """ RoR-3-110 model for CIFAR-10 from 'Residual Networks of Residual Networks: Multilevel Residual Networks,' https://arxiv.org/abs/1608.02908. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ror_cifar(num_classes=num_classes, blocks=110, model_name="ror3_110_cifar10", **kwargs) def ror3_110_cifar100(num_classes=100, **kwargs): """ RoR-3-110 model for CIFAR-100 from 'Residual Networks of Residual Networks: Multilevel Residual Networks,' https://arxiv.org/abs/1608.02908. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ror_cifar(num_classes=num_classes, blocks=110, model_name="ror3_110_cifar100", **kwargs) def ror3_110_svhn(num_classes=10, **kwargs): """ RoR-3-110 model for SVHN from 'Residual Networks of Residual Networks: Multilevel Residual Networks,' https://arxiv.org/abs/1608.02908. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ror_cifar(num_classes=num_classes, blocks=110, model_name="ror3_110_svhn", **kwargs) def ror3_164_cifar10(num_classes=10, **kwargs): """ RoR-3-164 model for CIFAR-10 from 'Residual Networks of Residual Networks: Multilevel Residual Networks,' https://arxiv.org/abs/1608.02908. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ror_cifar(num_classes=num_classes, blocks=164, model_name="ror3_164_cifar10", **kwargs) def ror3_164_cifar100(num_classes=100, **kwargs): """ RoR-3-164 model for CIFAR-100 from 'Residual Networks of Residual Networks: Multilevel Residual Networks,' https://arxiv.org/abs/1608.02908. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ror_cifar(num_classes=num_classes, blocks=164, model_name="ror3_164_cifar100", **kwargs) def ror3_164_svhn(num_classes=10, **kwargs): """ RoR-3-164 model for SVHN from 'Residual Networks of Residual Networks: Multilevel Residual Networks,' https://arxiv.org/abs/1608.02908. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ror_cifar(num_classes=num_classes, blocks=164, model_name="ror3_164_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (ror3_56_cifar10, 10), (ror3_56_cifar100, 100), (ror3_56_svhn, 10), (ror3_110_cifar10, 10), (ror3_110_cifar100, 100), (ror3_110_svhn, 10), (ror3_164_cifar10, 10), (ror3_164_cifar100, 100), (ror3_164_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != ror3_56_cifar10 or weight_count == 762746) assert (model != ror3_56_cifar100 or weight_count == 768596) assert (model != ror3_56_svhn or weight_count == 762746) assert (model != ror3_110_cifar10 or weight_count == 1637690) assert (model != ror3_110_cifar100 or weight_count == 1643540) assert (model != ror3_110_svhn or weight_count == 1637690) assert (model != ror3_164_cifar10 or weight_count == 2512634) assert (model != ror3_164_cifar100 or weight_count == 2518484) assert (model != ror3_164_svhn or weight_count == 2512634) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes)) if __name__ == "__main__": _test()
16,718
31.401163
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/contextnet.py
""" ContextNet for image segmentation, implemented in PyTorch. Original paper: 'ContextNet: Exploring Context and Detail for Semantic Segmentation in Real-time,' https://arxiv.org/abs/1805.04554. """ __all__ = ['ContextNet', 'ctxnet_cityscapes'] import os import torch import torch.nn as nn from .common import conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwsconv3x3_block, InterpolationBlock class CtxShallowNet(nn.Module): """ ContextNet specific shallow net (spatial detail encoder). Parameters: ---------- in_channels : int Number of input channels. mid1_channels : int Number of middle #1 channels. mid2_channels : int Number of middle #2 channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, mid1_channels, mid2_channels, out_channels): super(CtxShallowNet, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid1_channels, stride=2, padding=0) self.conv2 = dwsconv3x3_block( in_channels=mid1_channels, out_channels=mid2_channels, stride=2) self.conv3 = dwsconv3x3_block( in_channels=mid2_channels, out_channels=out_channels, stride=2) self.conv4 = dwsconv3x3_block( in_channels=out_channels, out_channels=out_channels) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) return x class LinearBottleneck(nn.Module): """ So-called 'Linear Bottleneck' layer (from MobileNetV2). It is used as a CtxDeepNet encoder unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the second convolution layer. expansion : bool Whether do expansion of channels. """ def __init__(self, in_channels, out_channels, stride, expansion): super(LinearBottleneck, self).__init__() self.residual = (in_channels == out_channels) and (stride == 1) mid_channels = in_channels * 6 if expansion else in_channels self.block = nn.Sequential( conv1x1_block( in_channels=in_channels, out_channels=mid_channels), dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride), conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None), ) def forward(self, x): if self.residual: identity = x x = self.block(x) if self.residual: x = x + identity return x class CtxDeepNet(nn.Module): """ ContextNet specific deep net (regular encoder). Parameters: ---------- in_channels : int Number of input channels. init_block_channels : int Number of channels for init block. """ def __init__(self, in_channels, init_block_channels): super(CtxDeepNet, self).__init__() layers = [1, 1, 3, 3, 2, 2] channels_per_layers = [32, 32, 48, 64, 96, 128] downsample = [0, 0, 1, 1, 0, 0] self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2, padding=0)) in_channels = init_block_channels for i, out_channels in enumerate(channels_per_layers): stage = nn.Sequential() expansion = (i != 0) for j in range(layers[i]): stride = 2 if (j == 0) and (downsample[i] == 1) else 1 stage.add_module("unit{}".format(j + 1), LinearBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, expansion=expansion)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) def forward(self, x): x = self.features(x) return x class FeatureFusion(nn.Module): """ ContextNet specific feature fusion block. Parameters: ---------- in_channels_high : int Number of input channels for x_high. in_channels_low : int Number of input channels for x_low. out_channels : int Number of output channels. """ def __init__(self, in_channels_high, in_channels_low, out_channels): super(FeatureFusion, self).__init__() self.conv_high = conv1x1_block( in_channels=in_channels_high, out_channels=out_channels, bias=True, activation=None) self.up = InterpolationBlock( scale_factor=4, align_corners=True) self.dw_conv_low = dwconv3x3_block( in_channels=in_channels_low, out_channels=out_channels) self.pw_conv_low = conv1x1_block( in_channels=out_channels, out_channels=out_channels, bias=True, activation=None) self.activ = nn.ReLU(True) def forward(self, x_high, x_low): x_high = self.conv_high(x_high) x_low = self.up(x_low) x_low = self.dw_conv_low(x_low) x_low = self.pw_conv_low(x_low) out = x_high + x_low out = self.activ(out) return out class CtxHead(nn.Module): """ ContextNet specific head/classifier block. Parameters: ---------- in_channels : int Number of input channels. num_classes : int Number of output channels/classes. """ def __init__(self, in_channels, num_classes): super(CtxHead, self).__init__() self.conv1 = dwsconv3x3_block( in_channels=in_channels, out_channels=in_channels) self.conv2 = dwsconv3x3_block( in_channels=in_channels, out_channels=in_channels) self.dropout = nn.Dropout(p=0.1, inplace=False) self.conv3 = conv1x1( in_channels=in_channels, out_channels=num_classes, bias=True) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.dropout(x) x = self.conv3(x) return x class CtxAuxHead(nn.Module): """ ContextNet specific auxiliary head/classifier block. Parameters: ---------- in_channels : int Number of input channels. num_classes : int Number of output channels/classes. """ def __init__(self, in_channels, mid_channels, num_classes): super(CtxAuxHead, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels) self.dropout = nn.Dropout(p=0.1, inplace=False) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=num_classes, bias=True) def forward(self, x): x = self.conv1(x) x = self.dropout(x) x = self.conv2(x) return x class ContextNet(nn.Module): """ ContextNet model from 'ContextNet: Exploring Context and Detail for Semantic Segmentation in Real-time,' https://arxiv.org/abs/1805.04554. Parameters: ---------- aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(ContextNet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.aux = aux self.fixed_size = fixed_size self.features_high = CtxShallowNet( in_channels=in_channels, mid1_channels=32, mid2_channels=64, out_channels=128) self.down = InterpolationBlock( scale_factor=4, align_corners=True, up=False) self.features_low = CtxDeepNet( in_channels=in_channels, init_block_channels=32) self.fusion = FeatureFusion( in_channels_high=128, in_channels_low=128, out_channels=128) self.head = CtxHead( in_channels=128, num_classes=num_classes) self.up = InterpolationBlock( scale_factor=8, align_corners=True) if self.aux: self.aux_head = CtxAuxHead( in_channels=128, mid_channels=32, num_classes=num_classes) def forward(self, x): x_high = self.features_high(x) x_low = self.down(x) x_low = self.features_low(x_low) x = self.fusion(x_high, x_low) x = self.head(x) x = self.up(x) if self.aux: y = self.aux_head(x_high) y = self.up(y) return x, y else: return x def get_ctxnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ContextNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = ContextNet( **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def ctxnet_cityscapes(num_classes=19, **kwargs): """ ContextNet model for Cityscapes from 'ContextNet: Exploring Context and Detail for Semantic Segmentation in Real-time,' https://arxiv.org/abs/1805.04554. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ctxnet(num_classes=num_classes, model_name="ctxnet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False aux = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ ctxnet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, aux=aux, fixed_size=fixed_size) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) if aux: assert (model != ctxnet_cityscapes or weight_count == 914118) else: assert (model != ctxnet_cityscapes or weight_count == 876563) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) ys = net(x) y = ys[0] if aux else ys # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if aux: assert (tuple(ys[1].size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
12,923
28.239819
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/dicenet.py
""" DiCENet for ImageNet-1K, implemented in PyTorch. Original paper: 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. """ __all__ = ['DiceNet', 'dicenet_wd5', 'dicenet_wd2', 'dicenet_w3d4', 'dicenet_w1', 'dicenet_w5d4', 'dicenet_w3d2', 'dicenet_w7d8', 'dicenet_w2'] import os import math import torch from torch.nn import init from torch import nn import torch.nn.functional as F from .common import conv1x1, conv3x3, conv1x1_block, conv3x3_block, NormActivation, ChannelShuffle, Concurrent class SpatialDiceBranch(nn.Module): """ Spatial element of DiCE block for selected dimension. Parameters: ---------- sp_size : int Desired size for selected spatial dimension. is_height : bool Is selected dimension height. """ def __init__(self, sp_size, is_height): super(SpatialDiceBranch, self).__init__() self.is_height = is_height self.index = 2 if is_height else 3 self.base_sp_size = sp_size self.conv = conv3x3( in_channels=self.base_sp_size, out_channels=self.base_sp_size, groups=self.base_sp_size) def forward(self, x): height, width = x.size()[2:] if self.is_height: real_sp_size = height real_in_size = (real_sp_size, width) base_in_size = (self.base_sp_size, width) else: real_sp_size = width real_in_size = (height, real_sp_size) base_in_size = (height, self.base_sp_size) if real_sp_size != self.base_sp_size: if real_sp_size < self.base_sp_size: x = F.interpolate(x, size=base_in_size, mode="bilinear", align_corners=True) else: x = F.adaptive_avg_pool2d(x, output_size=base_in_size) x = x.transpose(1, self.index).contiguous() x = self.conv(x) x = x.transpose(1, self.index).contiguous() changed_sp_size = x.size(self.index) if real_sp_size != changed_sp_size: if changed_sp_size < real_sp_size: x = F.interpolate(x, size=real_in_size, mode="bilinear", align_corners=True) else: x = F.adaptive_avg_pool2d(x, output_size=real_in_size) return x class DiceBaseBlock(nn.Module): """ Base part of DiCE block (without attention). Parameters: ---------- channels : int Number of input/output channels. in_size : tuple of two ints Spatial size of the expected input image. """ def __init__(self, channels, in_size): super(DiceBaseBlock, self).__init__() mid_channels = 3 * channels self.convs = Concurrent() self.convs.add_module("ch_conv", conv3x3( in_channels=channels, out_channels=channels, groups=channels)) self.convs.add_module("h_conv", SpatialDiceBranch( sp_size=in_size[0], is_height=True)) self.convs.add_module("w_conv", SpatialDiceBranch( sp_size=in_size[1], is_height=False)) self.norm_activ = NormActivation( in_channels=mid_channels, activation=(lambda: nn.PReLU(num_parameters=mid_channels))) self.shuffle = ChannelShuffle( channels=mid_channels, groups=3) self.squeeze_conv = conv1x1_block( in_channels=mid_channels, out_channels=channels, groups=channels, activation=(lambda: nn.PReLU(num_parameters=channels))) def forward(self, x): x = self.convs(x) x = self.norm_activ(x) x = self.shuffle(x) x = self.squeeze_conv(x) return x class DiceAttBlock(nn.Module): """ Pure attention part of DiCE block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. reduction : int, default 4 Squeeze reduction value. """ def __init__(self, in_channels, out_channels, reduction=4): super(DiceAttBlock, self).__init__() mid_channels = in_channels // reduction self.pool = nn.AdaptiveAvgPool2d(output_size=1) self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels, bias=False) self.activ = nn.ReLU(inplace=True) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): w = self.pool(x) w = self.conv1(w) w = self.activ(w) w = self.conv2(w) w = self.sigmoid(w) return w class DiceBlock(nn.Module): """ DiCE block (volume-wise separable convolutions). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. in_size : tuple of two ints Spatial size of the expected input image. """ def __init__(self, in_channels, out_channels, in_size): super(DiceBlock, self).__init__() proj_groups = math.gcd(in_channels, out_channels) self.base_block = DiceBaseBlock( channels=in_channels, in_size=in_size) self.att = DiceAttBlock( in_channels=in_channels, out_channels=out_channels) self.proj_conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, groups=proj_groups, activation=(lambda: nn.PReLU(num_parameters=out_channels))) def forward(self, x): x = self.base_block(x) w = self.att(x) x = self.proj_conv(x) x = x * w return x class StridedDiceLeftBranch(nn.Module): """ Left branch of the strided DiCE block. Parameters: ---------- channels : int Number of input/output channels. """ def __init__(self, channels): super(StridedDiceLeftBranch, self).__init__() self.conv1 = conv3x3_block( in_channels=channels, out_channels=channels, stride=2, groups=channels, activation=(lambda: nn.PReLU(num_parameters=channels))) self.conv2 = conv1x1_block( in_channels=channels, out_channels=channels, activation=(lambda: nn.PReLU(num_parameters=channels))) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class StridedDiceRightBranch(nn.Module): """ Right branch of the strided DiCE block. Parameters: ---------- channels : int Number of input/output channels. in_size : tuple of two ints Spatial size of the expected input image. """ def __init__(self, channels, in_size): super(StridedDiceRightBranch, self).__init__() self.pool = nn.AvgPool2d( kernel_size=3, padding=1, stride=2) self.dice = DiceBlock( in_channels=channels, out_channels=channels, in_size=(in_size[0] // 2, in_size[1] // 2)) self.conv = conv1x1_block( in_channels=channels, out_channels=channels, activation=(lambda: nn.PReLU(num_parameters=channels))) def forward(self, x): x = self.pool(x) x = self.dice(x) x = self.conv(x) return x class StridedDiceBlock(nn.Module): """ Strided DiCE block (strided volume-wise separable convolutions). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. in_size : tuple of two ints Spatial size of the expected input image. """ def __init__(self, in_channels, out_channels, in_size): super(StridedDiceBlock, self).__init__() assert (out_channels == 2 * in_channels) self.branches = Concurrent() self.branches.add_module("left_branch", StridedDiceLeftBranch(channels=in_channels)) self.branches.add_module("right_branch", StridedDiceRightBranch( channels=in_channels, in_size=in_size)) self.shuffle = ChannelShuffle( channels=out_channels, groups=2) def forward(self, x): x = self.branches(x) x = self.shuffle(x) return x class ShuffledDiceRightBranch(nn.Module): """ Right branch of the shuffled DiCE block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. in_size : tuple of two ints Spatial size of the expected input image. """ def __init__(self, in_channels, out_channels, in_size): super(ShuffledDiceRightBranch, self).__init__() self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=(lambda: nn.PReLU(num_parameters=out_channels))) self.dice = DiceBlock( in_channels=out_channels, out_channels=out_channels, in_size=in_size) def forward(self, x): x = self.conv(x) x = self.dice(x) return x class ShuffledDiceBlock(nn.Module): """ Shuffled DiCE block (shuffled volume-wise separable convolutions). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. in_size : tuple of two ints Spatial size of the expected input image. """ def __init__(self, in_channels, out_channels, in_size): super(ShuffledDiceBlock, self).__init__() self.left_part = in_channels - in_channels // 2 right_in_channels = in_channels - self.left_part right_out_channels = out_channels - self.left_part self.right_branch = ShuffledDiceRightBranch( in_channels=right_in_channels, out_channels=right_out_channels, in_size=in_size) self.shuffle = ChannelShuffle( channels=(2 * right_out_channels), groups=2) def forward(self, x): x1, x2 = torch.chunk(x, chunks=2, dim=1) x2 = self.right_branch(x2) x = torch.cat((x1, x2), dim=1) x = self.shuffle(x) return x class DiceInitBlock(nn.Module): """ DiceNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(DiceInitBlock, self).__init__() self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, activation=(lambda: nn.PReLU(num_parameters=out_channels))) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.pool(x) return x class DiceClassifier(nn.Module): """ DiceNet specific classifier block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. num_classes : int, default 1000 Number of classification classes. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, mid_channels, num_classes, dropout_rate): super(DiceClassifier, self).__init__() self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels, groups=4) self.dropout = nn.Dropout(p=dropout_rate) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=num_classes, bias=True) def forward(self, x): x = self.conv1(x) x = self.dropout(x) x = self.conv2(x) return x class DiceNet(nn.Module): """ DiCENet model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. classifier_mid_channels : int Number of middle channels for classifier. dropout_rate : float Parameter of Dropout layer in classifier. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, classifier_mid_channels, dropout_rate, in_channels=3, in_size=(224, 224), num_classes=1000): super(DiceNet, self).__init__() assert ((in_size[0] % 32 == 0) and (in_size[1] % 32 == 0)) self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", DiceInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels in_size = (in_size[0] // 4, in_size[1] // 4) for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): unit_class = StridedDiceBlock if j == 0 else ShuffledDiceBlock stage.add_module("unit{}".format(j + 1), unit_class( in_channels=in_channels, out_channels=out_channels, in_size=in_size)) in_channels = out_channels in_size = (in_size[0] // 2, in_size[1] // 2) if j == 0 else in_size self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1)) self.output = DiceClassifier( in_channels=in_channels, mid_channels=classifier_mid_channels, num_classes=num_classes, dropout_rate=dropout_rate) self.init_params() def init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): init.kaiming_normal_(m.weight, mode="fan_out") if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, x): x = self.features(x) x = self.output(x) x = x.view(x.size(0), -1) return x def get_dicenet(width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DiCENet model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels_per_layers_dict = { 0.2: [32, 64, 128], 0.5: [48, 96, 192], 0.75: [86, 172, 344], 1.0: [116, 232, 464], 1.25: [144, 288, 576], 1.5: [176, 352, 704], 1.75: [210, 420, 840], 2.0: [244, 488, 976], 2.4: [278, 556, 1112], } if width_scale not in channels_per_layers_dict.keys(): raise ValueError("Unsupported DiceNet with width scale: {}".format(width_scale)) channels_per_layers = channels_per_layers_dict[width_scale] layers = [3, 7, 3] if width_scale > 0.2: init_block_channels = 24 else: init_block_channels = 16 channels = [[ci] * li for i, (ci, li) in enumerate(zip(channels_per_layers, layers))] for i in range(len(channels)): pred_channels = channels[i - 1][-1] if i != 0 else init_block_channels channels[i] = [pred_channels * 2] + channels[i] if width_scale > 2.0: classifier_mid_channels = 1280 else: classifier_mid_channels = 1024 if width_scale > 1.0: dropout_rate = 0.2 else: dropout_rate = 0.1 net = DiceNet( channels=channels, init_block_channels=init_block_channels, classifier_mid_channels=classifier_mid_channels, dropout_rate=dropout_rate, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def dicenet_wd5(**kwargs): """ DiCENet x0.2 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=0.2, model_name="dicenet_wd5", **kwargs) def dicenet_wd2(**kwargs): """ DiCENet x0.5 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=0.5, model_name="dicenet_wd2", **kwargs) def dicenet_w3d4(**kwargs): """ DiCENet x0.75 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=0.75, model_name="dicenet_w3d4", **kwargs) def dicenet_w1(**kwargs): """ DiCENet x1.0 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=1.0, model_name="dicenet_w1", **kwargs) def dicenet_w5d4(**kwargs): """ DiCENet x1.25 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=1.25, model_name="dicenet_w5d4", **kwargs) def dicenet_w3d2(**kwargs): """ DiCENet x1.5 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=1.5, model_name="dicenet_w3d2", **kwargs) def dicenet_w7d8(**kwargs): """ DiCENet x1.75 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=1.75, model_name="dicenet_w7d8", **kwargs) def dicenet_w2(**kwargs): """ DiCENet x2.0 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=2.0, model_name="dicenet_w2", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False models = [ dicenet_wd5, dicenet_wd2, dicenet_w3d4, dicenet_w1, dicenet_w5d4, dicenet_w3d2, dicenet_w7d8, dicenet_w2, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != dicenet_wd5 or weight_count == 1130704) assert (model != dicenet_wd2 or weight_count == 1214120) assert (model != dicenet_w3d4 or weight_count == 1495676) assert (model != dicenet_w1 or weight_count == 1805604) assert (model != dicenet_w5d4 or weight_count == 2162888) assert (model != dicenet_w3d2 or weight_count == 2652200) assert (model != dicenet_w7d8 or weight_count == 3264932) assert (model != dicenet_w2 or weight_count == 3979044) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
23,378
29.441406
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/nvpattexp.py
""" Neural Voice Puppetry Audio-to-Expression net for speech-driven facial animation, implemented in PyTorch. Original paper: 'Neural Voice Puppetry: Audio-driven Facial Reenactment,' https://arxiv.org/abs/1912.05566. """ __all__ = ['NvpAttExp', 'nvpattexp116bazel76'] import os import torch import torch.nn as nn from .common import DenseBlock, ConvBlock, ConvBlock1d, SelectableDense class NvpAttExpEncoder(nn.Module): """ Neural Voice Puppetry Audio-to-Expression encoder. Parameters: ---------- audio_features : int Number of audio features (characters/sounds). audio_window_size : int Size of audio window (for time related audio features). seq_len : int, default Size of feature window. encoder_features : int Number of encoder features. """ def __init__(self, audio_features, audio_window_size, seq_len, encoder_features): super(NvpAttExpEncoder, self).__init__() self.audio_features = audio_features self.audio_window_size = audio_window_size self.seq_len = seq_len conv_channels = (32, 32, 64, 64) conv_slopes = (0.02, 0.02, 0.2, 0.2) fc_channels = (128, 64, encoder_features) fc_slopes = (0.02, 0.02, None) att_conv_channels = (16, 8, 4, 2, 1) att_conv_slopes = 0.02 in_channels = audio_features self.conv_branch = nn.Sequential() for i, (out_channels, slope) in enumerate(zip(conv_channels, conv_slopes)): self.conv_branch.add_module("conv{}".format(i + 1), ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0), bias=True, use_bn=False, activation=(lambda: nn.LeakyReLU(negative_slope=slope, inplace=True)))) in_channels = out_channels self.fc_branch = nn.Sequential() for i, (out_channels, slope) in enumerate(zip(fc_channels, fc_slopes)): activation = (lambda: nn.LeakyReLU(negative_slope=slope, inplace=True)) if slope is not None else\ (lambda: nn.Tanh()) self.fc_branch.add_module("fc{}".format(i + 1), DenseBlock( in_features=in_channels, out_features=out_channels, bias=True, use_bn=False, activation=activation)) in_channels = out_channels self.att_conv_branch = nn.Sequential() for i, out_channels, in enumerate(att_conv_channels): self.att_conv_branch.add_module("att_conv{}".format(i + 1), ConvBlock1d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=True, use_bn=False, activation=(lambda: nn.LeakyReLU(negative_slope=att_conv_slopes, inplace=True)))) in_channels = out_channels self.att_fc = DenseBlock( in_features=seq_len, out_features=seq_len, bias=True, use_bn=False, activation=(lambda: nn.Softmax(dim=1))) def forward(self, x): batch = x.shape[0] batch_seq_len = batch * self.seq_len x = x.view(batch_seq_len, 1, self.audio_window_size, self.audio_features) x = x.transpose(1, 3).contiguous() x = self.conv_branch(x) x = x.view(batch_seq_len, 1, -1) x = self.fc_branch(x) x = x.view(batch, self.seq_len, -1) x = x.transpose(1, 2).contiguous() y = x[:, :, (self.seq_len // 2)] w = self.att_conv_branch(x) w = w.view(batch, self.seq_len) w = self.att_fc(w) w = w.view(batch, self.seq_len, 1) x = torch.bmm(x, w) x = x.squeeze(dim=-1) return x, y class NvpAttExp(nn.Module): """ Neural Voice Puppetry Audio-to-Expression model from 'Neural Voice Puppetry: Audio-driven Facial Reenactment,' https://arxiv.org/abs/1912.05566. Parameters: ---------- audio_features : int, default 29 Number of audio features (characters/sounds). audio_window_size : int, default 16 Size of audio window (for time related audio features). seq_len : int, default 8 Size of feature window. base_persons : int, default 116 Number of base persons (identities). blendshapes : int, default 76 Number of 3D model blendshapes. encoder_features : int, default 32 Number of encoder features. """ def __init__(self, audio_features=29, audio_window_size=16, seq_len=8, base_persons=116, blendshapes=76, encoder_features=32): super(NvpAttExp, self).__init__() self.base_persons = base_persons self.encoder = NvpAttExpEncoder( audio_features=audio_features, audio_window_size=audio_window_size, seq_len=seq_len, encoder_features=encoder_features) self.decoder = SelectableDense( in_features=encoder_features, out_features=blendshapes, bias=False, num_options=base_persons) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x, pid): x, y = self.encoder(x) x = self.decoder(x, pid) y = self.decoder(y, pid) return x, y def get_nvpattexp(base_persons, blendshapes, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create Neural Voice Puppetry Audio-to-Expression model with specific parameters. Parameters: ---------- base_persons : int Number of base persons (subjects). blendshapes : int Number of 3D model blendshapes. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = NvpAttExp( base_persons=base_persons, blendshapes=blendshapes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def nvpattexp116bazel76(**kwargs): """ Neural Voice Puppetry Audio-to-Expression model for 116 base persons and Bazel topology with 76 blendshapes from 'Neural Voice Puppetry: Audio-driven Facial Reenactment,' https://arxiv.org/abs/1912.05566. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_nvpattexp(base_persons=116, blendshapes=76, model_name="nvpattexp116bazel76", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ nvpattexp116bazel76, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != nvpattexp116bazel76 or weight_count == 327397) batch = 14 seq_len = 8 audio_window_size = 16 audio_features = 29 blendshapes = 76 x = torch.randn(batch, seq_len, audio_window_size, audio_features) pid = torch.full(size=(batch,), fill_value=3, dtype=torch.int64) y1, y2 = net(x, pid) # y1.sum().backward() assert (y1.shape == y2.shape == (batch, blendshapes)) if __name__ == "__main__": _test()
8,810
31.754647
116
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/octresnet.py
""" Oct-ResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049. """ __all__ = ['OctResNet', 'octresnet10_ad2', 'octresnet50b_ad2', 'OctResUnit'] import os from inspect import isfunction import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from .common import DualPathSequential from .resnet import ResInitBlock class OctConv(nn.Conv2d): """ Octave convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. oct_alpha : float, default 0.0 Octave alpha coefficient. oct_mode : str, default 'std' Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'. oct_value : int, default 2 Octave value. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding=1, dilation=1, groups=1, bias=False, oct_alpha=0.0, oct_mode="std", oct_value=2): if isinstance(stride, int): stride = (stride, stride) self.downsample = (stride[0] > 1) or (stride[1] > 1) assert (stride[0] in [1, oct_value]) and (stride[1] in [1, oct_value]) stride = (1, 1) if oct_mode == "first": in_alpha = 0.0 out_alpha = oct_alpha elif oct_mode == "norm": in_alpha = oct_alpha out_alpha = oct_alpha elif oct_mode == "last": in_alpha = oct_alpha out_alpha = 0.0 elif oct_mode == "std": in_alpha = 0.0 out_alpha = 0.0 else: raise ValueError("Unsupported octave convolution mode: {}".format(oct_mode)) self.h_in_channels = int(in_channels * (1.0 - in_alpha)) self.h_out_channels = int(out_channels * (1.0 - out_alpha)) self.l_out_channels = out_channels - self.h_out_channels self.oct_alpha = oct_alpha self.oct_mode = oct_mode self.oct_value = oct_value super(OctConv, self).__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.conv_kwargs = { "stride": stride, "padding": padding, "dilation": dilation, "groups": groups} def forward(self, hx, lx=None): if self.oct_mode == "std": return F.conv2d( input=hx, weight=self.weight, bias=self.bias, **self.conv_kwargs), None if self.downsample: hx = F.avg_pool2d( input=hx, kernel_size=(self.oct_value, self.oct_value), stride=(self.oct_value, self.oct_value)) hhy = F.conv2d( input=hx, weight=self.weight[0:self.h_out_channels, 0:self.h_in_channels, :, :], bias=self.bias[0:self.h_out_channels] if self.bias is not None else None, **self.conv_kwargs) if self.oct_mode != "first": hlx = F.conv2d( input=lx, weight=self.weight[0:self.h_out_channels, self.h_in_channels:, :, :], bias=self.bias[0:self.h_out_channels] if self.bias is not None else None, **self.conv_kwargs) if self.oct_mode == "last": hy = hhy + hlx ly = None return hy, ly lhx = F.avg_pool2d( input=hx, kernel_size=(self.oct_value, self.oct_value), stride=(self.oct_value, self.oct_value)) lhy = F.conv2d( input=lhx, weight=self.weight[self.h_out_channels:, 0:self.h_in_channels, :, :], bias=self.bias[self.h_out_channels:] if self.bias is not None else None, **self.conv_kwargs) if self.oct_mode == "first": hy = hhy ly = lhy return hy, ly if self.downsample: hly = hlx llx = F.avg_pool2d( input=lx, kernel_size=(self.oct_value, self.oct_value), stride=(self.oct_value, self.oct_value)) else: hly = F.interpolate( input=hlx, scale_factor=self.oct_value, mode="nearest") llx = lx lly = F.conv2d( input=llx, weight=self.weight[self.h_out_channels:, self.h_in_channels:, :, :], bias=self.bias[self.h_out_channels:] if self.bias is not None else None, **self.conv_kwargs) hy = hhy + hly ly = lhy + lly return hy, ly class OctConvBlock(nn.Module): """ Octave convolution block with Batch normalization and ReLU/ReLU6 activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. oct_alpha : float, default 0.0 Octave alpha coefficient. oct_mode : str, default 'std' Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. activate : bool, default True Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, oct_alpha=0.0, oct_mode="std", bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True)), activate=True): super(OctConvBlock, self).__init__() self.activate = activate self.last = (oct_mode == "last") or (oct_mode == "std") out_alpha = 0.0 if self.last else oct_alpha h_out_channels = int(out_channels * (1.0 - out_alpha)) l_out_channels = out_channels - h_out_channels self.conv = OctConv( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, oct_alpha=oct_alpha, oct_mode=oct_mode) self.h_bn = nn.BatchNorm2d( num_features=h_out_channels, eps=bn_eps) if not self.last: self.l_bn = nn.BatchNorm2d( num_features=l_out_channels, eps=bn_eps) if self.activate: assert (activation is not None) if isfunction(activation): self.activ = activation() elif isinstance(activation, str): if activation == "relu": self.activ = nn.ReLU(inplace=True) elif activation == "relu6": self.activ = nn.ReLU6(inplace=True) else: raise NotImplementedError() else: self.activ = activation def forward(self, hx, lx=None): hx, lx = self.conv(hx, lx) hx = self.h_bn(hx) if self.activate: hx = self.activ(hx) if not self.last: lx = self.l_bn(lx) if self.activate: lx = self.activ(lx) return hx, lx def oct_conv1x1_block(in_channels, out_channels, stride=1, groups=1, bias=False, oct_alpha=0.0, oct_mode="std", bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True)), activate=True): """ 1x1 version of the octave convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. oct_alpha : float, default 0.0 Octave alpha coefficient. oct_mode : str, default 'std' Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. activate : bool, default True Whether activate the convolution block. """ return OctConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, groups=groups, bias=bias, oct_alpha=oct_alpha, oct_mode=oct_mode, bn_eps=bn_eps, activation=activation, activate=activate) def oct_conv3x3_block(in_channels, out_channels, stride=1, padding=1, dilation=1, groups=1, bias=False, oct_alpha=0.0, oct_mode="std", bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True)), activate=True): """ 3x3 version of the octave convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. oct_alpha : float, default 0.0 Octave alpha coefficient. oct_mode : str, default 'std' Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. activate : bool, default True Whether activate the convolution block. """ return OctConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, oct_alpha=oct_alpha, oct_mode=oct_mode, bn_eps=bn_eps, activation=activation, activate=activate) class OctResBlock(nn.Module): """ Simple Oct-ResNet block for residual path in Oct-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. oct_alpha : float, default 0.0 Octave alpha coefficient. oct_mode : str, default 'std' Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'. """ def __init__(self, in_channels, out_channels, stride, oct_alpha=0.0, oct_mode="std"): super(OctResBlock, self).__init__() self.conv1 = oct_conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, oct_alpha=oct_alpha, oct_mode=oct_mode) self.conv2 = oct_conv3x3_block( in_channels=out_channels, out_channels=out_channels, oct_alpha=oct_alpha, oct_mode=("std" if oct_mode == "last" else (oct_mode if oct_mode != "first" else "norm")), activation=None, activate=False) def forward(self, hx, lx=None): hx, lx = self.conv1(hx, lx) hx, lx = self.conv2(hx, lx) return hx, lx class OctResBottleneck(nn.Module): """ Oct-ResNet bottleneck block for residual path in Oct-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for the second convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer. oct_alpha : float, default 0.0 Octave alpha coefficient. oct_mode : str, default 'std' Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, oct_alpha=0.0, oct_mode="std", conv1_stride=False, bottleneck_factor=4): super(OctResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor self.conv1 = oct_conv1x1_block( in_channels=in_channels, out_channels=mid_channels, stride=(stride if conv1_stride else 1), oct_alpha=oct_alpha, oct_mode=(oct_mode if oct_mode != "last" else "norm")) self.conv2 = oct_conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=(1 if conv1_stride else stride), padding=padding, dilation=dilation, oct_alpha=oct_alpha, oct_mode=(oct_mode if oct_mode != "first" else "norm")) self.conv3 = oct_conv1x1_block( in_channels=mid_channels, out_channels=out_channels, oct_alpha=oct_alpha, oct_mode=("std" if oct_mode == "last" else (oct_mode if oct_mode != "first" else "norm")), activation=None, activate=False) def forward(self, hx, lx=None): hx, lx = self.conv1(hx, lx) hx, lx = self.conv2(hx, lx) hx, lx = self.conv3(hx, lx) return hx, lx class OctResUnit(nn.Module): """ Oct-ResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for the second convolution layer in bottleneck. dilation : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer in bottleneck. oct_alpha : float, default 0.0 Octave alpha coefficient. oct_mode : str, default 'std' Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, oct_alpha=0.0, oct_mode="std", bottleneck=True, conv1_stride=False): super(OctResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) or \ ((oct_mode == "first") and (oct_alpha != 0.0)) if bottleneck: self.body = OctResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, padding=padding, dilation=dilation, oct_alpha=oct_alpha, oct_mode=oct_mode, conv1_stride=conv1_stride) else: self.body = OctResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, oct_alpha=oct_alpha, oct_mode=oct_mode) if self.resize_identity: self.identity_conv = oct_conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, oct_alpha=oct_alpha, oct_mode=oct_mode, activation=None, activate=False) self.activ = nn.ReLU(inplace=True) def forward(self, hx, lx=None): if self.resize_identity: h_identity, l_identity = self.identity_conv(hx, lx) else: h_identity, l_identity = hx, lx hx, lx = self.body(hx, lx) hx = hx + h_identity hx = self.activ(hx) if lx is not None: lx = lx + l_identity lx = self.activ(lx) return hx, lx class OctResNet(nn.Module): """ Oct-ResNet model from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. oct_alpha : float, default 0.5 Octave alpha coefficient. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, oct_alpha=0.5, in_channels=3, in_size=(224, 224), num_classes=1000): super(OctResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = DualPathSequential( return_two=False, first_ordinals=1, last_ordinals=1) self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 if (i == 0) and (j == 0): oct_mode = "first" elif (i == len(channels) - 1) and (j == 0): oct_mode = "last" elif (i == len(channels) - 1) and (j != 0): oct_mode = "std" else: oct_mode = "norm" stage.add_module("unit{}".format(j + 1), OctResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, oct_alpha=oct_alpha, oct_mode=oct_mode, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_octresnet(blocks, bottleneck=None, conv1_stride=True, oct_alpha=0.5, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create Oct-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. oct_alpha : float, default 0.5 Octave alpha coefficient. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] elif blocks == 269: layers = [3, 30, 48, 8] else: raise ValueError("Unsupported Oct-ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = OctResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, oct_alpha=oct_alpha, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def octresnet10_ad2(**kwargs): """ Oct-ResNet-10 (alpha=1/2) model from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_octresnet(blocks=10, oct_alpha=0.5, model_name="octresnet10_ad2", **kwargs) def octresnet50b_ad2(**kwargs): """ Oct-ResNet-50b (alpha=1/2) model from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_octresnet(blocks=50, conv1_stride=False, oct_alpha=0.5, model_name="octresnet50b_ad2", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ octresnet10_ad2, octresnet50b_ad2, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != octresnet10_ad2 or weight_count == 5423016) assert (model != octresnet50b_ad2 or weight_count == 25557032) x = torch.randn(14, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (14, 1000)) if __name__ == "__main__": _test()
27,931
32.612515
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/prnet.py
""" PRNet for AFLW2000-3D, implemented in PyTorch. Original paper: 'Joint 3D Face Reconstruction and Dense Alignment with Position Map Regression Network,' https://arxiv.org/abs/1803.07835. """ __all__ = ['PRNet', 'prnet'] import os import torch.nn as nn from .common import ConvBlock, DeconvBlock, conv1x1, conv1x1_block, NormActivation def conv4x4_block(in_channels, out_channels, stride=1, padding=(1, 2, 1, 2), dilation=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ 4x4 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default (1, 2, 1, 2) Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=4, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) def deconv4x4_block(in_channels, out_channels, stride=1, padding=3, ext_padding=(2, 1, 2, 1), out_padding=0, dilation=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ 4x4 version of the standard deconvolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default (2, 1, 2, 1) Padding value for deconvolution layer. ext_padding : tuple/list of 4 int, default None Extra padding value for deconvolution layer. out_padding : int or tuple/list of 2 int Output padding value for deconvolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return DeconvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=4, stride=stride, padding=padding, ext_padding=ext_padding, out_padding=out_padding, dilation=dilation, groups=groups, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) class PRResBottleneck(nn.Module): """ PRNet specific bottleneck block for residual path in residual unit unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for the second convolution layer in bottleneck. bn_eps : float Small float added to variance in Batch norm. bottleneck_factor : int, default 2 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, padding, bn_eps, bottleneck_factor=2): super(PRResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bn_eps=bn_eps) self.conv2 = conv4x4_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, padding=padding, bn_eps=bn_eps) self.conv3 = conv1x1( in_channels=mid_channels, out_channels=out_channels) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class PRResUnit(nn.Module): """ PRNet specific ResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for the second convolution layer in bottleneck. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, padding, bn_eps, stride): super(PRResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if self.resize_identity: self.identity_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, stride=stride) self.body = PRResBottleneck( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps, stride=stride, padding=padding) self.norm_activ = NormActivation( in_channels=out_channels, bn_eps=bn_eps) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.norm_activ(x) return x class PROutputBlock(nn.Module): """ PRNet specific output block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(PROutputBlock, self).__init__() self.conv1 = deconv4x4_block( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps) self.conv2 = deconv4x4_block( in_channels=out_channels, out_channels=out_channels, bn_eps=bn_eps) self.conv3 = deconv4x4_block( in_channels=out_channels, out_channels=out_channels, bn_eps=bn_eps, activation=nn.Sigmoid()) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class PRNet(nn.Module): """ PRNet model from 'Joint 3D Face Reconstruction and Dense Alignment with Position Map Regression Network,' https://arxiv.org/abs/1803.07835. Parameters: ---------- channels : list of list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (256, 256) Spatial size of the expected input image. num_classes : int, default 3 Number of classification classes. """ def __init__(self, channels, init_block_channels, bn_eps=1e-5, in_channels=3, in_size=(256, 256), num_classes=3): super(PRNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv4x4_block( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps)) in_channels = init_block_channels encoder = nn.Sequential() for i, channels_per_stage in enumerate(channels[0]): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) else 1 padding = (1, 2, 1, 2) if (stride == 1) else 1 stage.add_module("unit{}".format(j + 1), PRResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, padding=padding, bn_eps=bn_eps)) in_channels = out_channels encoder.add_module("stage{}".format(i + 1), stage) self.features.add_module("encoder", encoder) decoder = nn.Sequential() for i, channels_per_stage in enumerate(channels[1]): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 padding = 3 if (stride == 1) else 1 ext_padding = (2, 1, 2, 1) if (stride == 1) else None stage.add_module("unit{}".format(j + 1), deconv4x4_block( in_channels=in_channels, out_channels=out_channels, stride=stride, padding=padding, ext_padding=ext_padding, bn_eps=bn_eps)) in_channels = out_channels decoder.add_module("stage{}".format(i + 1), stage) self.features.add_module("decoder", decoder) self.output = PROutputBlock( in_channels=in_channels, out_channels=num_classes, bn_eps=bn_eps) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = self.output(x) return x def get_prnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create PRNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 16 enc_channels = [[32, 32], [64, 64], [128, 128], [256, 256], [512, 512]] dec_channels = [[512], [256, 256, 256], [128, 128, 128], [64, 64, 64], [32, 32], [16, 16]] channels = [enc_channels, dec_channels] net = PRNet( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def prnet(**kwargs): """ PRNet model for AFLW2000-3D from 'Joint 3D Face Reconstruction and Dense Alignment with Position Map Regression Network,' https://arxiv.org/abs/1803.07835. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_prnet(model_name="prnet", bn_eps=1e-3, **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ prnet, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != prnet or weight_count == 13353618) x = torch.randn(1, 3, 256, 256) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 3, 256, 256)) if __name__ == "__main__": _test()
13,924
30.362613
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/pfpcnet.py
""" PFPCNet for 3D face reconstruction, implemented in PyTorch. Original paper: 'Production-Level Facial Performance Capture Using Deep Convolutional Neural Networks,' https://arxiv.org/abs/1609.06536. """ __all__ = ['PFPCNet', 'pfpcnet'] import os import torch.nn as nn import torch.nn.init as init from .common import conv3x3_block class PFPCNet(nn.Module): """ PFPCNet model from 'Production-Level Facial Performance Capture Using Deep Convolutional Neural Networks,' https://arxiv.org/abs/1609.06536. Parameters: ---------- channels : list of list of int Number of output channels for each unit. pca_size : int Number of PCA coefficients (number of blendshapes). use_bn : bool, default False Whether to use BatchNorm layers. in_channels : int, default 1 Number of input channels. in_size : tuple of two ints, default (320, 240) Spatial size of the expected input image. vertices : int, default 5023 Number of 3D geometry vertices. """ def __init__(self, channels, pca_size, use_bn=True, in_channels=1, in_size=(320, 240), vertices=5023): super(PFPCNet, self).__init__() self.in_size = in_size self.vertices = vertices self.encoder = nn.Sequential() for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if j == 0 else 1 stage.add_module("unit{}".format(j + 1), conv3x3_block( in_channels=in_channels, out_channels=out_channels, use_bn=use_bn, stride=stride)) in_channels = out_channels self.encoder.add_module("stage{}".format(i + 1), stage) self.decoder = nn.Sequential() self.decoder.add_module("dropout", nn.Dropout(p=0.2)) self.decoder.add_module("fc1", nn.Linear( in_features=(in_channels * 5 * 4), out_features=pca_size)) self.decoder.add_module("fc2", nn.Linear( in_features=pca_size, out_features=(3 * vertices))) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.encoder(x) x = x.view(x.size(0), -1) x = self.decoder(x) x = x.view(x.size(0), -1, 3) return x def get_pfpcnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create PFPCNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ layers = [2, 2, 2, 2, 2, 2] channels_per_layers = [64, 96, 144, 216, 324, 486] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] pca_size = 120 net = PFPCNet( channels=channels, pca_size=pca_size, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def pfpcnet(**kwargs): """ PFPCNet model from 'Production-Level Facial Performance Capture Using Deep Convolutional Neural Networks,' https://arxiv.org/abs/1609.06536. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pfpcnet(model_name="pfpcnet", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ pfpcnet, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != pfpcnet or weight_count == 9299329) batch = 4 in_channels = 1 vertices = 5023 x = torch.randn(batch, in_channels, 320, 240) y = net(x) y.sum().backward() assert (tuple(y.size()) == (batch, vertices, 3)) if __name__ == "__main__": _test()
5,314
28.859551
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/espcnet.py
""" ESPNet-C for image segmentation, implemented in PyTorch. Original paper: 'ESPNet: Efficient Spatial Pyramid of Dilated Convolutions for Semantic Segmentation,' https://arxiv.org/abs/1803.06815. """ __all__ = ['ESPCNet', 'espcnet_cityscapes', 'ESPBlock'] import os import torch import torch.nn as nn from .common import NormActivation, conv1x1, conv3x3, conv3x3_block, DualPathSequential, InterpolationBlock class HierarchicalConcurrent(nn.Sequential): """ A container for hierarchical concatenation of modules on the base of the sequential container. Parameters: ---------- exclude_first : bool, default False Whether to exclude the first branch in the intermediate sum. axis : int, default 1 The axis on which to concatenate the outputs. """ def __init__(self, exclude_first=False, axis=1): super(HierarchicalConcurrent, self).__init__() self.exclude_first = exclude_first self.axis = axis def forward(self, x): out = [] y_prev = None for i, module in enumerate(self._modules.values()): y = module(x) if y_prev is not None: y += y_prev out.append(y) if (not self.exclude_first) or (i > 0): y_prev = y out = torch.cat(tuple(out), dim=self.axis) return out class ESPBlock(nn.Module): """ ESPNet block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. downsample : bool Whether to downsample image. residual : bool Whether to use residual connection. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, downsample, residual, bn_eps): super(ESPBlock, self).__init__() self.residual = residual dilations = [1, 2, 4, 8, 16] num_branches = len(dilations) mid_channels = out_channels // num_branches extra_mid_channels = out_channels - (num_branches - 1) * mid_channels if downsample: self.reduce_conv = conv3x3( in_channels=in_channels, out_channels=mid_channels, stride=2) else: self.reduce_conv = conv1x1( in_channels=in_channels, out_channels=mid_channels) self.branches = HierarchicalConcurrent(exclude_first=True) for i in range(num_branches): out_channels_i = extra_mid_channels if i == 0 else mid_channels self.branches.add_module("branch{}".format(i + 1), conv3x3( in_channels=mid_channels, out_channels=out_channels_i, padding=dilations[i], dilation=dilations[i])) self.norm_activ = NormActivation( in_channels=out_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(out_channels))) def forward(self, x): y = self.reduce_conv(x) y = self.branches(y) if self.residual: y = y + x y = self.norm_activ(y) return y class ESPUnit(nn.Module): """ ESPNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. layers : int Number of layers. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, layers, bn_eps): super(ESPUnit, self).__init__() mid_channels = out_channels // 2 self.down = ESPBlock( in_channels=in_channels, out_channels=mid_channels, downsample=True, residual=False, bn_eps=bn_eps) self.blocks = nn.Sequential() for i in range(layers - 1): self.blocks.add_module("block{}".format(i + 1), ESPBlock( in_channels=mid_channels, out_channels=mid_channels, downsample=False, residual=True, bn_eps=bn_eps)) def forward(self, x): x = self.down(x) y = self.blocks(x) x = torch.cat((y, x), dim=1) # NB: This differs from the original implementation. return x class ESPStage(nn.Module): """ ESPNet stage. Parameters: ---------- x_channels : int Number of input/output channels for x. y_in_channels : int Number of input channels for y. y_out_channels : int Number of output channels for y. layers : int Number of layers in the unit. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, x_channels, y_in_channels, y_out_channels, layers, bn_eps): super(ESPStage, self).__init__() self.use_x = (x_channels > 0) self.use_unit = (layers > 0) if self.use_x: self.x_down = nn.AvgPool2d( kernel_size=3, stride=2, padding=1) if self.use_unit: self.unit = ESPUnit( in_channels=y_in_channels, out_channels=(y_out_channels - x_channels), layers=layers, bn_eps=bn_eps) self.norm_activ = NormActivation( in_channels=y_out_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(y_out_channels))) def forward(self, y, x=None): if self.use_unit: y = self.unit(y) if self.use_x: x = self.x_down(x) y = torch.cat((y, x), dim=1) y = self.norm_activ(y) return y, x class ESPCNet(nn.Module): """ ESPNet-C model from 'ESPNet: Efficient Spatial Pyramid of Dilated Convolutions for Semantic Segmentation,' https://arxiv.org/abs/1803.06815. Parameters: ---------- layers : list of int Number of layers for each unit. channels : list of int Number of output channels for each unit (for y-branch). init_block_channels : int Number of output channels for the initial unit. cut_x : list of int Whether to concatenate with x-branch for each unit. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, layers, channels, init_block_channels, cut_x, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(ESPCNet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size self.features = DualPathSequential( return_two=False, first_ordinals=1, last_ordinals=0) self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2, bn_eps=bn_eps, activation=(lambda: nn.PReLU(init_block_channels)))) y_in_channels = init_block_channels for i, (layers_i, y_out_channels) in enumerate(zip(layers, channels)): self.features.add_module("stage{}".format(i + 1), ESPStage( x_channels=in_channels if cut_x[i] == 1 else 0, y_in_channels=y_in_channels, y_out_channels=y_out_channels, layers=layers_i, bn_eps=bn_eps)) y_in_channels = y_out_channels self.head = conv1x1( in_channels=y_in_channels, out_channels=num_classes) self.up = InterpolationBlock( scale_factor=8, align_corners=False) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): in_size = self.in_size if self.fixed_size else x.shape[2:] y = self.features(x, x) y = self.head(y) y = self.up(y, size=in_size) return y def get_espcnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ESPNet-C model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 16 layers = [0, 6, 4] channels = [19, 131, 256] cut_x = [1, 1, 0] bn_eps = 1e-3 net = ESPCNet( layers=layers, channels=channels, init_block_channels=init_block_channels, cut_x=cut_x, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def espcnet_cityscapes(num_classes=19, **kwargs): """ ESPNet-C model for Cityscapes from 'ESPNet: Efficient Spatial Pyramid of Dilated Convolutions for Semantic Segmentation,' https://arxiv.org/abs/1803.06815. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_espcnet(num_classes=num_classes, model_name="espcnet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ espcnet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != espcnet_cityscapes or weight_count == 210889) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
12,104
29.2625
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/alexnet.py
""" AlexNet for ImageNet-1K, implemented in PyTorch. Original paper: 'One weird trick for parallelizing convolutional neural networks,' https://arxiv.org/abs/1404.5997. """ __all__ = ['AlexNet', 'alexnet', 'alexnetb'] import os import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from .common import ConvBlock class AlexConv(ConvBlock): """ AlexNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. use_lrn : bool Whether to use LRN layer. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, use_lrn): super(AlexConv, self).__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=True, use_bn=False) self.use_lrn = use_lrn def forward(self, x): x = super(AlexConv, self).forward(x) if self.use_lrn: x = F.local_response_norm(x, size=5, k=2.0) return x class AlexDense(nn.Module): """ AlexNet specific dense block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(AlexDense, self).__init__() self.fc = nn.Linear( in_features=in_channels, out_features=out_channels) self.activ = nn.ReLU(inplace=True) self.dropout = nn.Dropout(p=0.5) def forward(self, x): x = self.fc(x) x = self.activ(x) x = self.dropout(x) return x class AlexOutputBlock(nn.Module): """ AlexNet specific output block. Parameters: ---------- in_channels : int Number of input channels. classes : int Number of classification classes. """ def __init__(self, in_channels, classes): super(AlexOutputBlock, self).__init__() mid_channels = 4096 self.fc1 = AlexDense( in_channels=in_channels, out_channels=mid_channels) self.fc2 = AlexDense( in_channels=mid_channels, out_channels=mid_channels) self.fc3 = nn.Linear( in_features=mid_channels, out_features=classes) def forward(self, x): x = self.fc1(x) x = self.fc2(x) x = self.fc3(x) return x class AlexNet(nn.Module): """ AlexNet model from 'One weird trick for parallelizing convolutional neural networks,' https://arxiv.org/abs/1404.5997. Parameters: ---------- channels : list of list of int Number of output channels for each unit. kernel_sizes : list of list of int Convolution window sizes for each unit. strides : list of list of int or tuple/list of 2 int Strides of the convolution for each unit. paddings : list of list of int or tuple/list of 2 int Padding value for convolution layer for each unit. use_lrn : bool Whether to use LRN layer. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, kernel_sizes, strides, paddings, use_lrn, in_channels=3, in_size=(224, 224), num_classes=1000): super(AlexNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() for i, channels_per_stage in enumerate(channels): use_lrn_i = use_lrn and (i in [0, 1]) stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stage.add_module("unit{}".format(j + 1), AlexConv( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_sizes[i][j], stride=strides[i][j], padding=paddings[i][j], use_lrn=use_lrn_i)) in_channels = out_channels stage.add_module("pool{}".format(i + 1), nn.MaxPool2d( kernel_size=3, stride=2, padding=0, ceil_mode=True)) self.features.add_module("stage{}".format(i + 1), stage) self.output = AlexOutputBlock( in_channels=(in_channels * 6 * 6), classes=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_alexnet(version="a", model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create AlexNet model with specific parameters. Parameters: ---------- version : str, default 'a' Version of AlexNet ('a' or 'b'). model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if version == "a": channels = [[96], [256], [384, 384, 256]] kernel_sizes = [[11], [5], [3, 3, 3]] strides = [[4], [1], [1, 1, 1]] paddings = [[0], [2], [1, 1, 1]] use_lrn = True elif version == "b": channels = [[64], [192], [384, 256, 256]] kernel_sizes = [[11], [5], [3, 3, 3]] strides = [[4], [1], [1, 1, 1]] paddings = [[2], [2], [1, 1, 1]] use_lrn = False else: raise ValueError("Unsupported AlexNet version {}".format(version)) net = AlexNet( channels=channels, kernel_sizes=kernel_sizes, strides=strides, paddings=paddings, use_lrn=use_lrn, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def alexnet(**kwargs): """ AlexNet model from 'One weird trick for parallelizing convolutional neural networks,' https://arxiv.org/abs/1404.5997. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_alexnet(model_name="alexnet", **kwargs) def alexnetb(**kwargs): """ AlexNet-b model from 'One weird trick for parallelizing convolutional neural networks,' https://arxiv.org/abs/1404.5997. Non-standard version. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_alexnet(version="b", model_name="alexnetb", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ alexnet, alexnetb, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != alexnet or weight_count == 62378344) assert (model != alexnetb or weight_count == 61100840) x = torch.randn(1, 3, 224, 224) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
9,244
27.890625
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/mobilenet_cub.py
""" MobileNet & FD-MobileNet for CUB-200-2011, implemented in torch. Original papers: - 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. - 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. """ __all__ = ['mobilenet_w1_cub', 'mobilenet_w3d4_cub', 'mobilenet_wd2_cub', 'mobilenet_wd4_cub', 'fdmobilenet_w1_cub', 'fdmobilenet_w3d4_cub', 'fdmobilenet_wd2_cub', 'fdmobilenet_wd4_cub'] from .mobilenet import get_mobilenet from .fdmobilenet import get_fdmobilenet def mobilenet_w1_cub(num_classes=200, **kwargs): """ 1.0 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- num_classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenet(num_classes=num_classes, width_scale=1.0, model_name="mobilenet_w1_cub", **kwargs) def mobilenet_w3d4_cub(num_classes=200, **kwargs): """ 0.75 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- num_classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenet(num_classes=num_classes, width_scale=0.75, model_name="mobilenet_w3d4_cub", **kwargs) def mobilenet_wd2_cub(num_classes=200, **kwargs): """ 0.5 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- num_classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenet(num_classes=num_classes, width_scale=0.5, model_name="mobilenet_wd2_cub", **kwargs) def mobilenet_wd4_cub(num_classes=200, **kwargs): """ 0.25 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- num_classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenet(num_classes=num_classes, width_scale=0.25, model_name="mobilenet_wd4_cub", **kwargs) def fdmobilenet_w1_cub(num_classes=200, **kwargs): """ FD-MobileNet 1.0x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- num_classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fdmobilenet(num_classes=num_classes, width_scale=1.0, model_name="fdmobilenet_w1_cub", **kwargs) def fdmobilenet_w3d4_cub(num_classes=200, **kwargs): """ FD-MobileNet 0.75x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- num_classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fdmobilenet(num_classes=num_classes, width_scale=0.75, model_name="fdmobilenet_w3d4_cub", **kwargs) def fdmobilenet_wd2_cub(num_classes=200, **kwargs): """ FD-MobileNet 0.5x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- num_classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fdmobilenet(num_classes=num_classes, width_scale=0.5, model_name="fdmobilenet_wd2_cub", **kwargs) def fdmobilenet_wd4_cub(num_classes=200, **kwargs): """ FD-MobileNet 0.25x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- num_classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fdmobilenet(num_classes=num_classes, width_scale=0.25, model_name="fdmobilenet_wd4_cub", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ mobilenet_w1_cub, mobilenet_w3d4_cub, mobilenet_wd2_cub, mobilenet_wd4_cub, fdmobilenet_w1_cub, fdmobilenet_w3d4_cub, fdmobilenet_wd2_cub, fdmobilenet_wd4_cub, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != mobilenet_w1_cub or weight_count == 3411976) assert (model != mobilenet_w3d4_cub or weight_count == 1970360) assert (model != mobilenet_wd2_cub or weight_count == 921192) assert (model != mobilenet_wd4_cub or weight_count == 264472) assert (model != fdmobilenet_w1_cub or weight_count == 2081288) assert (model != fdmobilenet_w3d4_cub or weight_count == 1218104) assert (model != fdmobilenet_wd2_cub or weight_count == 583528) assert (model != fdmobilenet_wd4_cub or weight_count == 177560) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 200)) if __name__ == "__main__": _test()
7,269
34.990099
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/wrn.py
""" WRN for ImageNet-1K, implemented in PyTorch. Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. """ __all__ = ['WRN', 'wrn50_2'] import os import torch.nn as nn import torch.nn.init as init class WRNConv(nn.Module): """ WRN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. activate : bool Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, activate): super(WRNConv, self).__init__() self.activate = activate self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=True) if self.activate: self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) if self.activate: x = self.activ(x) return x def wrn_conv1x1(in_channels, out_channels, stride, activate): """ 1x1 version of the WRN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. activate : bool Whether activate the convolution block. """ return WRNConv( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, activate=activate) def wrn_conv3x3(in_channels, out_channels, stride, activate): """ 3x3 version of the WRN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. activate : bool Whether activate the convolution block. """ return WRNConv( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1, activate=activate) class WRNBottleneck(nn.Module): """ WRN bottleneck block for residual path in WRN unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. width_factor : float Wide scale factor for width of layers. """ def __init__(self, in_channels, out_channels, stride, width_factor): super(WRNBottleneck, self).__init__() mid_channels = int(round(out_channels // 4 * width_factor)) self.conv1 = wrn_conv1x1( in_channels=in_channels, out_channels=mid_channels, stride=1, activate=True) self.conv2 = wrn_conv3x3( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activate=True) self.conv3 = wrn_conv1x1( in_channels=mid_channels, out_channels=out_channels, stride=1, activate=False) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class WRNUnit(nn.Module): """ WRN unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. width_factor : float Wide scale factor for width of layers. """ def __init__(self, in_channels, out_channels, stride, width_factor): super(WRNUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = WRNBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, width_factor=width_factor) if self.resize_identity: self.identity_conv = wrn_conv1x1( in_channels=in_channels, out_channels=out_channels, stride=stride, activate=False) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class WRNInitBlock(nn.Module): """ WRN specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(WRNInitBlock, self).__init__() self.conv = WRNConv( in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, padding=3, activate=True) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.pool(x) return x class WRN(nn.Module): """ WRN model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. width_factor : float Wide scale factor for width of layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, width_factor, in_channels=3, in_size=(224, 224), num_classes=1000): super(WRN, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", WRNInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), WRNUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, width_factor=width_factor)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_wrn(blocks, width_factor, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create WRN model with specific parameters. Parameters: ---------- blocks : int Number of blocks. width_factor : float Wide scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported WRN with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = WRN( channels=channels, init_block_channels=init_block_channels, width_factor=width_factor, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def wrn50_2(**kwargs): """ WRN-50-2 model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn(blocks=50, width_factor=2.0, model_name="wrn50_2", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ wrn50_2, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != wrn50_2 or weight_count == 68849128) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
11,401
26.474699
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/inceptionv3.py
""" InceptionV3 for ImageNet-1K, implemented in PyTorch. Original paper: 'Rethinking the Inception Architecture for Computer Vision,' https://arxiv.org/abs/1512.00567. """ __all__ = ['InceptionV3', 'inceptionv3', 'MaxPoolBranch', 'AvgPoolBranch', 'Conv1x1Branch', 'ConvSeqBranch'] import os import torch import torch.nn as nn from .common import ConvBlock, conv1x1_block, conv3x3_block, Concurrent class MaxPoolBranch(nn.Module): """ Inception specific max pooling branch block. """ def __init__(self): super(MaxPoolBranch, self).__init__() self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=0) def forward(self, x): x = self.pool(x) return x class AvgPoolBranch(nn.Module): """ Inception specific average pooling branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. count_include_pad : bool, default True Whether to include the zero-padding in the averaging calculation. """ def __init__(self, in_channels, out_channels, bn_eps, count_include_pad=True): super(AvgPoolBranch, self).__init__() self.pool = nn.AvgPool2d( kernel_size=3, stride=1, padding=1, count_include_pad=count_include_pad) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps) def forward(self, x): x = self.pool(x) x = self.conv(x) return x class Conv1x1Branch(nn.Module): """ Inception specific convolutional 1x1 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(Conv1x1Branch, self).__init__() self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps) def forward(self, x): x = self.conv(x) return x class ConvSeqBranch(nn.Module): """ Inception specific convolutional sequence branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of tuple of int List of numbers of output channels. kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int List of convolution window sizes. strides_list : list of tuple of int or tuple of tuple/list of 2 int List of strides of the convolution. padding_list : list of tuple of int or tuple of tuple/list of 2 int List of padding values for convolution layers. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels_list, kernel_size_list, strides_list, padding_list, bn_eps): super(ConvSeqBranch, self).__init__() assert (len(out_channels_list) == len(kernel_size_list)) assert (len(out_channels_list) == len(strides_list)) assert (len(out_channels_list) == len(padding_list)) self.conv_list = nn.Sequential() for i, (out_channels, kernel_size, strides, padding) in enumerate(zip( out_channels_list, kernel_size_list, strides_list, padding_list)): self.conv_list.add_module("conv{}".format(i + 1), ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=strides, padding=padding, bn_eps=bn_eps)) in_channels = out_channels def forward(self, x): x = self.conv_list(x) return x class ConvSeq3x3Branch(nn.Module): """ InceptionV3 specific convolutional sequence branch block with splitting by 3x3. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of tuple of int List of numbers of output channels. kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int List of convolution window sizes. strides_list : list of tuple of int or tuple of tuple/list of 2 int List of strides of the convolution. padding_list : list of tuple of int or tuple of tuple/list of 2 int List of padding values for convolution layers. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels_list, kernel_size_list, strides_list, padding_list, bn_eps): super(ConvSeq3x3Branch, self).__init__() self.conv_list = nn.Sequential() for i, (out_channels, kernel_size, strides, padding) in enumerate(zip( out_channels_list, kernel_size_list, strides_list, padding_list)): self.conv_list.add_module("conv{}".format(i + 1), ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=strides, padding=padding, bn_eps=bn_eps)) in_channels = out_channels self.conv1x3 = ConvBlock( in_channels=in_channels, out_channels=in_channels, kernel_size=(1, 3), stride=1, padding=(0, 1), bn_eps=bn_eps) self.conv3x1 = ConvBlock( in_channels=in_channels, out_channels=in_channels, kernel_size=(3, 1), stride=1, padding=(1, 0), bn_eps=bn_eps) def forward(self, x): x = self.conv_list(x) y1 = self.conv1x3(x) y2 = self.conv3x1(x) x = torch.cat((y1, y2), dim=1) return x class InceptionAUnit(nn.Module): """ InceptionV3 type Inception-A unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(InceptionAUnit, self).__init__() assert (out_channels > 224) pool_out_channels = out_channels - 224 self.branches = Concurrent() self.branches.add_module("branch1", Conv1x1Branch( in_channels=in_channels, out_channels=64, bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(48, 64), kernel_size_list=(1, 5), strides_list=(1, 1), padding_list=(0, 2), bn_eps=bn_eps)) self.branches.add_module("branch3", ConvSeqBranch( in_channels=in_channels, out_channels_list=(64, 96, 96), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 1), padding_list=(0, 1, 1), bn_eps=bn_eps)) self.branches.add_module("branch4", AvgPoolBranch( in_channels=in_channels, out_channels=pool_out_channels, bn_eps=bn_eps)) def forward(self, x): x = self.branches(x) return x class ReductionAUnit(nn.Module): """ InceptionV3 type Reduction-A unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(ReductionAUnit, self).__init__() assert (in_channels == 288) assert (out_channels == 768) self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(384,), kernel_size_list=(3,), strides_list=(2,), padding_list=(0,), bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(64, 96, 96), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 2), padding_list=(0, 1, 0), bn_eps=bn_eps)) self.branches.add_module("branch3", MaxPoolBranch()) def forward(self, x): x = self.branches(x) return x class InceptionBUnit(nn.Module): """ InceptionV3 type Inception-B unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of output channels in the 7x7 branches. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, mid_channels, bn_eps): super(InceptionBUnit, self).__init__() assert (in_channels == 768) assert (out_channels == 768) self.branches = Concurrent() self.branches.add_module("branch1", Conv1x1Branch( in_channels=in_channels, out_channels=192, bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(mid_channels, mid_channels, 192), kernel_size_list=(1, (1, 7), (7, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 3), (3, 0)), bn_eps=bn_eps)) self.branches.add_module("branch3", ConvSeqBranch( in_channels=in_channels, out_channels_list=(mid_channels, mid_channels, mid_channels, mid_channels, 192), kernel_size_list=(1, (7, 1), (1, 7), (7, 1), (1, 7)), strides_list=(1, 1, 1, 1, 1), padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3)), bn_eps=bn_eps)) self.branches.add_module("branch4", AvgPoolBranch( in_channels=in_channels, out_channels=192, bn_eps=bn_eps)) def forward(self, x): x = self.branches(x) return x class ReductionBUnit(nn.Module): """ InceptionV3 type Reduction-B unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(ReductionBUnit, self).__init__() assert (in_channels == 768) assert (out_channels == 1280) self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 320), kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0), bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 192, 192, 192), kernel_size_list=(1, (1, 7), (7, 1), 3), strides_list=(1, 1, 1, 2), padding_list=(0, (0, 3), (3, 0), 0), bn_eps=bn_eps)) self.branches.add_module("branch3", MaxPoolBranch()) def forward(self, x): x = self.branches(x) return x class InceptionCUnit(nn.Module): """ InceptionV3 type Inception-C unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(InceptionCUnit, self).__init__() assert (out_channels == 2048) self.branches = Concurrent() self.branches.add_module("branch1", Conv1x1Branch( in_channels=in_channels, out_channels=320, bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeq3x3Branch( in_channels=in_channels, out_channels_list=(384,), kernel_size_list=(1,), strides_list=(1,), padding_list=(0,), bn_eps=bn_eps)) self.branches.add_module("branch3", ConvSeq3x3Branch( in_channels=in_channels, out_channels_list=(448, 384), kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 1), bn_eps=bn_eps)) self.branches.add_module("branch4", AvgPoolBranch( in_channels=in_channels, out_channels=192, bn_eps=bn_eps)) def forward(self, x): x = self.branches(x) return x class InceptInitBlock(nn.Module): """ InceptionV3 specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(InceptInitBlock, self).__init__() assert (out_channels == 192) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=32, stride=2, padding=0, bn_eps=bn_eps) self.conv2 = conv3x3_block( in_channels=32, out_channels=32, stride=1, padding=0, bn_eps=bn_eps) self.conv3 = conv3x3_block( in_channels=32, out_channels=64, stride=1, padding=1, bn_eps=bn_eps) self.pool1 = nn.MaxPool2d( kernel_size=3, stride=2, padding=0) self.conv4 = conv1x1_block( in_channels=64, out_channels=80, stride=1, padding=0, bn_eps=bn_eps) self.conv5 = conv3x3_block( in_channels=80, out_channels=192, stride=1, padding=0, bn_eps=bn_eps) self.pool2 = nn.MaxPool2d( kernel_size=3, stride=2, padding=0) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.pool1(x) x = self.conv4(x) x = self.conv5(x) x = self.pool2(x) return x class InceptionV3(nn.Module): """ InceptionV3 model from 'Rethinking the Inception Architecture for Computer Vision,' https://arxiv.org/abs/1512.00567. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. b_mid_channels : list of int Number of middle channels for each Inception-B unit. dropout_rate : float, default 0.0 Fraction of the input units to drop. Must be a number between 0 and 1. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (299, 299) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, b_mid_channels, bn_eps=1e-5, dropout_rate=0.5, in_channels=3, in_size=(299, 299), num_classes=1000): super(InceptionV3, self).__init__() self.in_size = in_size self.num_classes = num_classes normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit] reduction_units = [ReductionAUnit, ReductionBUnit] self.features = nn.Sequential() self.features.add_module("init_block", InceptInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): if (j == 0) and (i != 0): unit = reduction_units[i - 1] else: unit = normal_units[i] if unit == InceptionBUnit: stage.add_module("unit{}".format(j + 1), unit( in_channels=in_channels, out_channels=out_channels, mid_channels=b_mid_channels[j - 1], bn_eps=bn_eps)) else: stage.add_module("unit{}".format(j + 1), unit( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Sequential() self.output.add_module("dropout", nn.Dropout(p=dropout_rate)) self.output.add_module("fc", nn.Linear( in_features=in_channels, out_features=num_classes)) self._init_params() def _init_params(self): for module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_inceptionv3(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create InceptionV3 model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 192 channels = [[256, 288, 288], [768, 768, 768, 768, 768], [1280, 2048, 2048]] b_mid_channels = [128, 160, 160, 192] net = InceptionV3( channels=channels, init_block_channels=init_block_channels, b_mid_channels=b_mid_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def inceptionv3(**kwargs): """ InceptionV3 model from 'Rethinking the Inception Architecture for Computer Vision,' https://arxiv.org/abs/1512.00567. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_inceptionv3(model_name="inceptionv3", bn_eps=1e-3, **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ inceptionv3, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != inceptionv3 or weight_count == 23834568) x = torch.randn(1, 3, 299, 299) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
21,472
29.807747
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/fdmobilenet.py
""" FD-MobileNet for ImageNet-1K, implemented in PyTorch. Original paper: 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. """ __all__ = ['fdmobilenet_w1', 'fdmobilenet_w3d4', 'fdmobilenet_wd2', 'fdmobilenet_wd4', 'get_fdmobilenet'] import os from .mobilenet import MobileNet def get_fdmobilenet(width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create FD-MobileNet model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 1024]] first_stage_stride = True if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] net = MobileNet( channels=channels, first_stage_stride=first_stage_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def fdmobilenet_w1(**kwargs): """ FD-MobileNet 1.0x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fdmobilenet(width_scale=1.0, model_name="fdmobilenet_w1", **kwargs) def fdmobilenet_w3d4(**kwargs): """ FD-MobileNet 0.75x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fdmobilenet(width_scale=0.75, model_name="fdmobilenet_w3d4", **kwargs) def fdmobilenet_wd2(**kwargs): """ FD-MobileNet 0.5x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fdmobilenet(width_scale=0.5, model_name="fdmobilenet_wd2", **kwargs) def fdmobilenet_wd4(**kwargs): """ FD-MobileNet 0.25x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fdmobilenet(width_scale=0.25, model_name="fdmobilenet_wd4", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ fdmobilenet_w1, fdmobilenet_w3d4, fdmobilenet_wd2, fdmobilenet_wd4, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != fdmobilenet_w1 or weight_count == 2901288) assert (model != fdmobilenet_w3d4 or weight_count == 1833304) assert (model != fdmobilenet_wd2 or weight_count == 993928) assert (model != fdmobilenet_wd4 or weight_count == 383160) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
4,771
29.394904
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/others/_inceptionresnetv1_.py
__all__ = ['inceptionresnetv1'] import torch from torch import nn from common import conv1x1, ConvBlock, conv1x1_block, conv3x3_block, Concurrent class MaxPoolBranch(nn.Module): """ InceptionResNetV2 specific max pooling branch block. """ def __init__(self): super(MaxPoolBranch, self).__init__() self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=0) def forward(self, x): x = self.pool(x) return x class Conv1x1Branch(nn.Module): """ InceptionResNetV2 specific convolutional 1x1 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(Conv1x1Branch, self).__init__() self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps) def forward(self, x): x = self.conv(x) return x class ConvSeqBranch(nn.Module): """ InceptionResNetV2 specific convolutional sequence branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of tuple of int List of numbers of output channels. kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int List of convolution window sizes. strides_list : list of tuple of int or tuple of tuple/list of 2 int List of strides of the convolution. padding_list : list of tuple of int or tuple of tuple/list of 2 int List of padding values for convolution layers. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels_list, kernel_size_list, strides_list, padding_list, bn_eps): super(ConvSeqBranch, self).__init__() assert (len(out_channels_list) == len(kernel_size_list)) assert (len(out_channels_list) == len(strides_list)) assert (len(out_channels_list) == len(padding_list)) self.conv_list = nn.Sequential() for i, (out_channels, kernel_size, strides, padding) in enumerate(zip( out_channels_list, kernel_size_list, strides_list, padding_list)): self.conv_list.add_module("conv{}".format(i + 1), ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=strides, padding=padding, bn_eps=bn_eps)) in_channels = out_channels def forward(self, x): x = self.conv_list(x) return x class InceptionAUnit(nn.Module): """ InceptionResNetV1 type Inception-A unit. Parameters: ---------- bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps): super(InceptionAUnit, self).__init__() self.scale = 0.17 in_channels = 256 self.branches = Concurrent() self.branches.add_module("branch1", Conv1x1Branch( in_channels=in_channels, out_channels=32, bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(32, 32), kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 1), bn_eps=bn_eps)) self.branches.add_module("branch3", ConvSeqBranch( in_channels=in_channels, out_channels_list=(32, 32, 32), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 1), padding_list=(0, 1, 1), bn_eps=bn_eps)) self.conv = conv1x1( in_channels=96, out_channels=in_channels, bias=True) self.activ = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.branches(x) x = self.conv(x) x = self.scale * x + identity x = self.activ(x) return x class ReductionAUnit(nn.Module): """ InceptionResNetV1 type Reduction-A unit. Parameters: ---------- bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps): super(ReductionAUnit, self).__init__() in_channels = 256 self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(384,), kernel_size_list=(3,), strides_list=(2,), padding_list=(0,), bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 192, 256), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 2), padding_list=(0, 1, 0), bn_eps=bn_eps)) self.branches.add_module("branch3", MaxPoolBranch()) def forward(self, x): x = self.branches(x) return x class InceptionBUnit(nn.Module): """ InceptionResNetV1 type Inception-B unit. Parameters: ---------- bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps): super(InceptionBUnit, self).__init__() self.scale = 0.10 in_channels = 896 self.branches = Concurrent() self.branches.add_module("branch1", Conv1x1Branch( in_channels=in_channels, out_channels=128, bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(128, 128, 128), kernel_size_list=(1, (1, 7), (7, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 3), (3, 0)), bn_eps=bn_eps)) self.conv = conv1x1( in_channels=256, out_channels=in_channels, bias=True) self.activ = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.branches(x) x = self.conv(x) x = self.scale * x + identity x = self.activ(x) return x class ReductionBUnit(nn.Module): """ InceptionResNetV1 type Reduction-B unit. Parameters: ---------- bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps): super(ReductionBUnit, self).__init__() in_channels = 896 self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 384), kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0), bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 256), kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0), bn_eps=bn_eps)) self.branches.add_module("branch3", ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 256, 256), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 2), padding_list=(0, 1, 0), bn_eps=bn_eps)) self.branches.add_module("branch4", MaxPoolBranch()) def forward(self, x): x = self.branches(x) return x class InceptionCUnit(nn.Module): """ InceptionResNetV1 type Inception-C unit. Parameters: ---------- scale : float, default 1.0 Scale value for residual branch. activate : bool, default True Whether activate the convolution block. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps, scale=0.2, activate=True): super(InceptionCUnit, self).__init__() self.activate = activate self.scale = scale in_channels = 1792 self.branches = Concurrent() self.branches.add_module("branch1", Conv1x1Branch( in_channels=in_channels, out_channels=192, bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 192, 192), kernel_size_list=(1, (1, 3), (3, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 1), (1, 0)), bn_eps=bn_eps)) self.conv = conv1x1( in_channels=384, out_channels=in_channels, bias=True) if self.activate: self.activ = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.branches(x) x = self.conv(x) x = self.scale * x + identity if self.activate: x = self.activ(x) return x class InceptInitBlock(nn.Module): """ InceptionResNetV1 specific initial block. Parameters: ---------- in_channels : int Number of input channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, bn_eps): super(InceptInitBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=32, stride=2, padding=0, bn_eps=bn_eps) self.conv2 = conv3x3_block( in_channels=32, out_channels=32, stride=1, padding=0, bn_eps=bn_eps) self.conv3 = conv3x3_block( in_channels=32, out_channels=64, stride=1, padding=1, bn_eps=bn_eps) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=0) self.conv4 = conv1x1_block( in_channels=64, out_channels=80, stride=1, padding=0, bn_eps=bn_eps) self.conv5 = conv3x3_block( in_channels=80, out_channels=192, stride=1, padding=0, bn_eps=bn_eps) self.conv6 = conv3x3_block( in_channels=192, out_channels=256, stride=2, padding=0, bn_eps=bn_eps) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.pool(x) x = self.conv4(x) x = self.conv5(x) x = self.conv6(x) return x class InceptHead(nn.Module): """ InceptionResNetV1 specific classification block. Parameters: ---------- in_channels : int Number of input channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps, dropout_rate, num_classes): super(InceptHead, self).__init__() self.use_dropout = (dropout_rate != 0.0) if self.use_dropout: self.dropout = nn.Dropout(dropout_rate) self.fc1 = nn.Linear(1792, 512, bias=False) self.bn = nn.BatchNorm1d(512, eps=bn_eps) self.fc2 = nn.Linear(512, num_classes) def forward(self, x): if self.use_dropout: x = self.dropout(x) x = self.fc1(x) x = self.bn(x) x = self.fc2(x) return x class InceptionResNetV1(nn.Module): """Inception Resnet V1 model with optional loading of pretrained weights. Model parameters can be loaded based on pretraining on the VGGFace2 or CASIA-Webface datasets. Pretrained state_dicts are automatically downloaded on model instantiation if requested and cached in the torch cache. Subsequent instantiations use the cache rather than redownloading. Keyword Arguments: num_classes {int} -- Number of output classes. If 'pretrained' is set and num_classes not equal to that used for the pretrained model, the final linear layer will be randomly initialized. (default: {None}) dropout_prob {float} -- Dropout probability. (default: {0.6}) """ def __init__(self, dropout_prob=0.6, bn_eps=1e-5, in_channels=3, in_size=(299, 299), num_classes=1000): super(InceptionResNetV1, self).__init__() self.in_size = in_size self.num_classes = num_classes layers = [5, 11, 7] normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit] reduction_units = [ReductionAUnit, ReductionBUnit] self.features = nn.Sequential() self.features.add_module("init_block", InceptInitBlock( in_channels=in_channels, bn_eps=bn_eps)) for i, layers_per_stage in enumerate(layers): stage = nn.Sequential() for j in range(layers_per_stage): if (j == 0) and (i != 0): unit = reduction_units[i - 1] else: unit = normal_units[i] if (i == len(layers) - 1) and (j == layers_per_stage - 1): stage.add_module("unit{}".format(j + 1), unit(bn_eps=bn_eps, scale=1.0, activate=False)) else: stage.add_module("unit{}".format(j + 1), unit(bn_eps=bn_eps)) self.features.add_module("stage{}".format(i + 1), stage) self.avgpool_1a = nn.AdaptiveAvgPool2d(1) self.output = InceptHead( bn_eps=bn_eps, dropout_rate=dropout_prob, num_classes=num_classes) def forward(self, x): x = self.features(x) x = self.avgpool_1a(x) x = x.view(x.size(0), -1) x = self.output(x) return x def inceptionresnetv1(pretrained=False, **kwargs): return InceptionResNetV1(bn_eps=1e-3, **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ inceptionresnetv1, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != inceptionresnetv1 or weight_count == 23995624) x = torch.randn(1, 3, 299, 299) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
15,341
28.334608
108
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/others/oth_vit.py
from functools import partial import torch import torch.nn as nn class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear( in_features=dim, out_features=(dim * 3), bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear( in_features=dim, out_features=dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) attn = (q.matmul(k.transpose(-2, -1))) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn.matmul(v)).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class MLP(nn.Module): def __init__(self, channels, mid_channels, dropout_rate): super().__init__() self.fc1 = nn.Linear(channels, mid_channels) self.activ = nn.GELU() self.fc2 = nn.Linear(mid_channels, channels) self.dropout = nn.Dropout(dropout_rate) def forward(self, x): x = self.fc1(x) x = self.activ(x) x = self.dropout(x) x = self.fc2(x) x = self.dropout(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio, qkv_bias, qk_scale, dropout_rate, att_dropout_rate, norm_layer=nn.LayerNorm): super().__init__() mlp_hidden_dim = int(dim * mlp_ratio) self.norm1 = norm_layer(dim) self.att = Attention( dim=dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=att_dropout_rate, proj_drop=dropout_rate) self.norm2 = norm_layer(dim) self.mlp = MLP( channels=dim, mid_channels=mlp_hidden_dim, dropout_rate=dropout_rate) def forward(self, x): x = x + self.att(self.norm1(x)) x = x + self.mlp(self.norm2(x)) return x class ImagePatchEmbedding(nn.Module): def __init__(self, in_channels, embedding_dim, patch_size): super().__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=embedding_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): x = self.conv(x) x = x.flatten(start_dim=2) x = x.transpose(1, 2) return x class VisionTransformer(nn.Module): """ Args: in_channels (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True qk_scale (float): override default qk scale of head_dim ** -0.5 if set dropout_rate (float): dropout rate att_dropout_rate (float): attention dropout rate norm_layer: (nn.Module): normalization layer """ def __init__(self, in_size=(224, 224), patch_size=(16, 16), in_channels=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, dropout_rate=0., att_dropout_rate=0., norm_layer=None): super().__init__() # assert (representation_size is None) self.num_classes = num_classes self.num_features = embed_dim norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) self.patch_embed = ImagePatchEmbedding( in_channels=in_channels, embedding_dim=embed_dim, patch_size=patch_size) num_patches = (in_size[1] // patch_size[1]) * (in_size[0] // patch_size[0]) self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) self.pos_drop = nn.Dropout(p=dropout_rate) self.blocks = nn.Sequential() for i in range(depth): self.blocks.add_module("block{}".format(i + 1), Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, dropout_rate=dropout_rate, att_dropout_rate=att_dropout_rate, norm_layer=norm_layer)) self.norm = norm_layer(embed_dim) self.head = nn.Linear( in_features=self.num_features, out_features=num_classes) def forward(self, x): x = self.patch_embed(x) B = x.shape[0] cls_tokens = self.cls_token.expand(B, -1, -1) x = torch.cat((cls_tokens, x), dim=1) x = x + self.pos_embed x = self.pos_drop(x) x = self.blocks(x) x = self.norm(x)[:, 0] x = self.head(x) return x def _create_vision_transformer(variant, pretrained=False, **kwargs): net = VisionTransformer(**kwargs) return net def vit_small_patch16_224(pretrained=False, **kwargs): """ My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3.""" model_kwargs = dict( embed_dim=768, depth=8, num_heads=8, mlp_ratio=3., qkv_bias=False, norm_layer=nn.LayerNorm, **kwargs) if pretrained: # NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model model_kwargs.setdefault('qk_scale', 768 ** -0.5) model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **model_kwargs) return model def vit_base_patch16_224(pretrained=False, **kwargs): """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. """ model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **model_kwargs) return model def vit_large_patch16_224(pretrained=False, **kwargs): """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. """ model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **model_kwargs) return model def vit_deit_tiny_patch16_224(pretrained=False, **kwargs): """ DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_kwargs = dict(embed_dim=192, depth=12, num_heads=3, **kwargs) model = _create_vision_transformer('vit_deit_tiny_patch16_224', pretrained=pretrained, **model_kwargs) return model def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False in_size = (224, 224) classes = 1000 models = [ vit_small_patch16_224, vit_base_patch16_224, vit_large_patch16_224, vit_deit_tiny_patch16_224, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != vit_small_patch16_224 or weight_count == 48754408) assert (model != vit_base_patch16_224 or weight_count == 86567656) assert (model != vit_large_patch16_224 or weight_count == 304326632) assert (model != vit_deit_tiny_patch16_224 or weight_count == 5717416) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) y.sum().backward() assert (tuple(y.size()) == (batch, classes)) if __name__ == "__main__": _test()
9,413
31.129693
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/others/_espnet.py
""" ESPNet for image segmentation, implemented in PyTorch. Original paper: 'ESPNet: Efficient Spatial Pyramid of Dilated Convolutions for Semantic Segmentation,' https://arxiv.org/abs/1803.06815. """ __all__ = ['ESPNet', 'espnet_cityscapes'] import os import torch import torch.nn as nn from common import conv1x1, conv3x3_block, NormActivation, DeconvBlock from espcnet import ESPCNet, ESPBlock class ESPFinalBlock(nn.Module): """ ESPNet final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(ESPFinalBlock, self).__init__() self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(out_channels))) self.deconv = nn.ConvTranspose2d( in_channels=out_channels, out_channels=out_channels, kernel_size=2, stride=2, padding=0, output_padding=0, bias=False) def forward(self, x): x = self.conv(x) x = self.deconv(x) return x class ESPNet(ESPCNet): """ ESPNet model from 'ESPNet: Efficient Spatial Pyramid of Dilated Convolutions for Semantic Segmentation,' https://arxiv.org/abs/1803.06815. Parameters: ---------- layers : list of int Number of layers for each unit. channels : list of int Number of output channels for each unit (for y-branch). init_block_channels : int Number of output channels for the initial unit. cut_x : list of int Whether to concatenate with x-branch for each unit. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, layers, channels, init_block_channels, cut_x, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(ESPNet, self).__init__( layers=layers, channels=channels, init_block_channels=init_block_channels, cut_x=cut_x, bn_eps=bn_eps, aux=aux, fixed_size=fixed_size, in_channels=in_channels, in_size=in_size, num_classes=num_classes) assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size self.skip1 = nn.BatchNorm2d( num_features=num_classes, eps=bn_eps) self.skip2 = conv1x1( in_channels=channels[1], out_channels=num_classes) self.up1 = nn.Sequential(nn.ConvTranspose2d( in_channels=num_classes, out_channels=num_classes, kernel_size=2, stride=2, padding=0, output_padding=0, bias=False)) self.up2 = nn.Sequential() self.up2.add_module("block1", NormActivation( in_channels=(2 * num_classes), bn_eps=bn_eps, activation=(lambda: nn.PReLU(2 * num_classes)))) self.up2.add_module("block2", ESPBlock( in_channels=(2 * num_classes), out_channels=num_classes, downsample=False, residual=False, bn_eps=bn_eps)) self.up2.add_module("block3", DeconvBlock( in_channels=num_classes, out_channels=num_classes, kernel_size=2, stride=2, padding=0, bn_eps=bn_eps, activation=(lambda: nn.PReLU(num_classes)))) self.decoder_head = ESPFinalBlock( in_channels=(channels[0] + num_classes), out_channels=num_classes, bn_eps=bn_eps) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): y0 = self.features.init_block(x) y1, x = self.features.stage1(y0, x) y2, x = self.features.stage2(y1, x) y3, x = self.features.stage3(y2, x) yh = self.head(y3) v1 = self.skip1(yh) z1 = self.up1(v1) v2 = self.skip2(y2) z2 = torch.cat((v2, z1), dim=1) z2 = self.up2(z2) z = torch.cat((z2, y1), dim=1) z = self.decoder_head(z) return z def get_espnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ESPNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 16 layers = [0, 3, 4] channels = [19, 131, 256] cut_x = [1, 1, 0] bn_eps = 1e-3 net = ESPNet( layers=layers, channels=channels, init_block_channels=init_block_channels, cut_x=cut_x, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def espnet_cityscapes(num_classes=19, **kwargs): """ ESPNet model for Cityscapes from 'ESPNet: Efficient Spatial Pyramid of Dilated Convolutions for Semantic Segmentation,' https://arxiv.org/abs/1803.06815. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_espnet(num_classes=num_classes, model_name="espnet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ espnet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != espnet_cityscapes or weight_count == 201542) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
8,299
29.181818
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/others/oth_espnet.py
import torch import torch.nn as nn import torch.nn.functional as F class CBR(nn.Module): ''' This class defines the convolution layer with batch normalization and PReLU activation ''' def __init__(self, nIn, nOut, kSize, stride=1): ''' :param nIn: number of input channels :param nOut: number of output channels :param kSize: kernel size :param stride: stride rate for down-sampling. Default is 1 ''' super().__init__() padding = int((kSize - 1)/2) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False) self.bn = nn.BatchNorm2d(nOut, eps=1e-03) self.act = nn.PReLU(nOut) def forward(self, input): ''' :param input: input feature map :return: transformed feature map ''' output = self.conv(input) output = self.bn(output) output = self.act(output) return output class BR(nn.Module): ''' This class groups the batch normalization and PReLU activation ''' def __init__(self, nOut): ''' :param nOut: output feature maps ''' super().__init__() self.bn = nn.BatchNorm2d(nOut, eps=1e-03) self.act = nn.PReLU(nOut) def forward(self, input): ''' :param input: input feature map :return: normalized and thresholded feature map ''' output = self.bn(input) output = self.act(output) return output class CB(nn.Module): ''' This class groups the convolution and batch normalization ''' def __init__(self, nIn, nOut, kSize, stride=1): ''' :param nIn: number of input channels :param nOut: number of output channels :param kSize: kernel size :param stride: optinal stide for down-sampling ''' super().__init__() padding = int((kSize - 1)/2) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False) self.bn = nn.BatchNorm2d(nOut, eps=1e-03) def forward(self, input): ''' :param input: input feature map :return: transformed feature map ''' output = self.conv(input) output = self.bn(output) return output class C(nn.Module): ''' This class is for a convolutional layer. ''' def __init__(self, nIn, nOut, kSize, stride=1): ''' :param nIn: number of input channels :param nOut: number of output channels :param kSize: kernel size :param stride: optional stride rate for down-sampling ''' super().__init__() padding = int((kSize - 1)/2) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False) def forward(self, input): ''' :param input: input feature map :return: transformed feature map ''' output = self.conv(input) return output class CDilated(nn.Module): ''' This class defines the dilated convolution, which can maintain feature map size ''' def __init__(self, nIn, nOut, kSize, stride=1, d=1): ''' :param nIn: number of input channels :param nOut: number of output channels :param kSize: kernel size :param stride: optional stride rate for down-sampling :param d: optional dilation rate ''' super().__init__() padding = int((kSize - 1)/2) * d self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False, dilation=d) def forward(self, input): ''' :param input: input feature map :return: transformed feature map ''' output = self.conv(input) return output class DownSamplerB(nn.Module): def __init__(self, nIn, nOut): super().__init__() n = int(nOut/5) n1 = nOut - 4*n self.c1 = C(nIn, n, 3, 2) self.d1 = CDilated(n, n1, 3, 1, 1) self.d2 = CDilated(n, n, 3, 1, 2) self.d4 = CDilated(n, n, 3, 1, 4) self.d8 = CDilated(n, n, 3, 1, 8) self.d16 = CDilated(n, n, 3, 1, 16) self.bn = nn.BatchNorm2d(nOut, eps=1e-3) self.act = nn.PReLU(nOut) def forward(self, input): output1 = self.c1(input) d1 = self.d1(output1) d2 = self.d2(output1) d4 = self.d4(output1) d8 = self.d8(output1) d16 = self.d16(output1) # Using hierarchical feature fusion (HFF) to ease the gridding artifacts which is introduced # by the large effective receptive filed of the ESP module add1 = d2 add2 = add1 + d4 add3 = add2 + d8 add4 = add3 + d16 combine = torch.cat([d1, add1, add2, add3, add4],1) #combine_in_out = input + combine #shotcut path output = self.bn(combine) output = self.act(output) return output #ESP block class DilatedParllelResidualBlockB(nn.Module): ''' This class defines the ESP block, which is based on the following principle Reduce ---> Split ---> Transform --> Merge ''' def __init__(self, nIn, nOut, add=True): ''' :param nIn: number of input channels :param nOut: number of output channels :param add: if true, add a residual connection through identity operation. You can use projection too as in ResNet paper, but we avoid to use it if the dimensions are not the same because we do not want to increase the module complexity ''' super().__init__() n = int(nOut/5) #K=5, n1 = nOut - 4*n #(N-(K-1)INT(N/K)) for dilation rate of 2^0, for producing an output feature map of channel=nOut self.c1 = C(nIn, n, 1, 1) #the point-wise convolutions with 1x1 help in reducing the computation, channel=c #K=5, dilation rate: 2^{k-1},k={1,2,3,...,K} self.d1 = CDilated(n, n1, 3, 1, 1) # dilation rate of 2^0 self.d2 = CDilated(n, n, 3, 1, 2) # dilation rate of 2^1 self.d4 = CDilated(n, n, 3, 1, 4) # dilation rate of 2^2 self.d8 = CDilated(n, n, 3, 1, 8) # dilation rate of 2^3 self.d16 = CDilated(n, n, 3, 1, 16) # dilation rate of 2^4 self.bn = BR(nOut) self.add = add def forward(self, input): ''' :param input: input feature map :return: transformed feature map ''' # reduce output1 = self.c1(input) # split and transform d1 = self.d1(output1) d2 = self.d2(output1) d4 = self.d4(output1) d8 = self.d8(output1) d16 = self.d16(output1) # Using hierarchical feature fusion (HFF) to ease the gridding artifacts which is introduced # by the large effective receptive filed of the ESP module add1 = d2 add2 = add1 + d4 add3 = add2 + d8 add4 = add3 + d16 #merge combine = torch.cat([d1, add1, add2, add3, add4], 1) # if residual version if self.add: combine = input + combine output = self.bn(combine) return output class InputProjectionA(nn.Module): ''' This class projects the input image to the same spatial dimensions as the feature map. For example, if the input image is 512 x512 x3 and spatial dimensions of feature map size are 56x56xF, then this class will generate an output of 56x56x3, for input reinforcement, which establishes a direct link between the input image and encoding stage, improving the flow of information. ''' def __init__(self, samplingTimes): ''' :param samplingTimes: The rate at which you want to down-sample the image ''' super().__init__() self.pool = nn.ModuleList() for i in range(0, samplingTimes): #pyramid-based approach for down-sampling self.pool.append(nn.AvgPool2d(3, stride=2, padding=1)) def forward(self, input): ''' :param input: Input RGB Image :return: down-sampled image (pyramid-based approach) ''' for pool in self.pool: input = pool(input) return input class ESPNet_Encoder(nn.Module): ''' This class defines the ESPNet-C network in the paper ''' def __init__(self, num_classes=19, p=5, q=3): ''' :param num_classes: number of classes in the dataset. Default is 20 for the cityscapes :param p: depth multiplier :param q: depth multiplier ''' super().__init__() self.level1 = CBR(3, 16, 3, 2) # feature map size divided 2, 1/2 self.sample1 = InputProjectionA(1) #down-sample for input reinforcement, factor=2 self.sample2 = InputProjectionA(2) #down-sample for input reinforcement, factor=4 self.b1 = BR(16 + 3) self.level2_0 = DownSamplerB(16 +3, 64) # Downsample Block, feature map size divided 2, 1/4 self.level2 = nn.ModuleList() for i in range(0, p): self.level2.append(DilatedParllelResidualBlockB(64 , 64)) #ESP block self.b2 = BR(128 + 3) self.level3_0 = DownSamplerB(128 + 3, 128) #Downsample Block, feature map size divided 2, 1/8 self.level3 = nn.ModuleList() for i in range(0, q): self.level3.append(DilatedParllelResidualBlockB(128 , 128)) # ESPblock self.b3 = BR(256) self.classifier = C(256, num_classes, 1, 1) def forward(self, input): ''' :param input: Receives the input RGB image :return: the transformed feature map with spatial dimensions 1/8th of the input image ''' output0 = self.level1(input) inp1 = self.sample1(input) inp2 = self.sample2(input) output0_cat = self.b1(torch.cat([output0, inp1], 1)) output1_0 = self.level2_0(output0_cat) # down-sampled for i, layer in enumerate(self.level2): if i==0: output1 = layer(output1_0) else: output1 = layer(output1) output1_cat = self.b2(torch.cat([output1, output1_0, inp2], 1)) output2_0 = self.level3_0(output1_cat) # down-sampled for i, layer in enumerate(self.level3): if i==0: output2 = layer(output2_0) else: output2 = layer(output2) output2_cat = self.b3(torch.cat([output2_0, output2], 1)) classifier = self.classifier(output2_cat) #return classifier out = F.upsample(classifier, input.size()[2:], mode='bilinear') #Upsample score map, factor=8 return out class ESPNet(nn.Module): ''' This class defines the ESPNet network ''' def __init__(self, num_classes=19, p=2, q=3, encoderFile=None): ''' :param num_classes: number of classes in the dataset. Default is 20 for the cityscapes :param p: depth multiplier :param q: depth multiplier :param encoderFile: pretrained encoder weights. Recall that we first trained the ESPNet-C and then attached the RUM-based light weight decoder. See paper for more details. ''' super().__init__() self.encoder = ESPNet_Encoder(num_classes, p, q) if encoderFile != None: self.encoder.load_state_dict(torch.load(encoderFile)) print('Encoder loaded!') # load the encoder modules self.en_modules = [] for i, m in enumerate(self.encoder.children()): self.en_modules.append(m) # light-weight decoder self.level3_C = C(128 + 3, num_classes, 1, 1) self.br = nn.BatchNorm2d(num_classes, eps=1e-03) self.conv = CBR(19 + num_classes, num_classes, 3, 1) self.up_l3 = nn.Sequential(nn.ConvTranspose2d(num_classes, num_classes, 2, stride=2, padding=0, output_padding=0, bias=False)) self.combine_l2_l3 = nn.Sequential(BR(2 * num_classes), DilatedParllelResidualBlockB(2 * num_classes, num_classes, add=False)) self.up_l2 = nn.Sequential(nn.ConvTranspose2d(num_classes, num_classes, 2, stride=2, padding=0, output_padding=0, bias=False), BR(num_classes)) self.classifier = nn.ConvTranspose2d(num_classes, num_classes, 2, stride=2, padding=0, output_padding=0, bias=False) def forward(self, input): ''' :param input: RGB image :return: transformed feature map ''' output0 = self.en_modules[0](input) inp1 = self.en_modules[1](input) inp2 = self.en_modules[2](input) output0_cat = self.en_modules[3](torch.cat([output0, inp1], 1)) output1_0 = self.en_modules[4](output0_cat) # down-sampled for i, layer in enumerate(self.en_modules[5]): if i == 0: output1 = layer(output1_0) else: output1 = layer(output1) output1_cat = self.en_modules[6](torch.cat([output1, output1_0, inp2], 1)) output2_0 = self.en_modules[7](output1_cat) # down-sampled for i, layer in enumerate(self.en_modules[8]): if i == 0: output2 = layer(output2_0) else: output2 = layer(output2) output2_cat = self.en_modules[9](torch.cat([output2_0, output2], 1)) # concatenate for feature map width expansion output2_c = self.up_l3(self.br(self.en_modules[10](output2_cat))) #RUM output1_C = self.level3_C(output1_cat) # project to C-dimensional space comb_l2_l3 = self.up_l2(self.combine_l2_l3(torch.cat([output1_C, output2_c], 1))) #RUM concat_features = self.conv(torch.cat([comb_l2_l3, output0_cat], 1)) classifier = self.classifier(concat_features) return classifier def oth_espnet_cityscapes(num_classes=19, pretrained=False, **kwargs): return ESPNet(num_classes=num_classes, **kwargs) def oth_espnetc_cityscapes(num_classes=19, pretrained=False, **kwargs): return ESPNet_Encoder(num_classes=num_classes, **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False # fixed_size = True in_size = (1024, 2048) classes = 19 models = [ oth_espnet_cityscapes, # oth_espnetc_cityscapes, ] for model in models: # from torchsummary import summary # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # net = ESPNet(num_classes=19).to(device) # summary(net, (3, 256, 512)) net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != oth_espnet_cityscapes or weight_count == 201542) assert (model != oth_espnetc_cityscapes or weight_count == 210889) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
15,567
33.90583
151
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/others/oth_quartznet.py
__all__ = ['oth_quartznet5x5_en_ls', 'oth_quartznet15x5_en', 'oth_quartznet15x5_en_nr', 'oth_quartznet15x5_fr', 'oth_quartznet15x5_de', 'oth_quartznet15x5_it', 'oth_quartznet15x5_es', 'oth_quartznet15x5_ca', 'oth_quartznet15x5_pl', 'oth_quartznet15x5_ru', 'oth_jasperdr10x5_en', 'oth_jasperdr10x5_en_nr', 'oth_quartznet15x5_ru34'] import torch.nn as nn # import torch.nn.functional as F # import editdistance class CtcDecoder(object): """ CTC decoder (to decode a sequence of labels to words). Parameters: ---------- vocabulary : list of str Vocabulary of the dataset. """ def __init__(self, vocabulary): super().__init__() self.blank_id = len(vocabulary) self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))]) def __call__(self, predictions): """ Decode a sequence of labels to words. Parameters: ---------- predictions : np.array of int or list of list of int Tensor with predicted labels. Returns: ------- list of str Words. """ hypotheses = [] for prediction in predictions: decoded_prediction = [] previous = self.blank_id for p in prediction: if (p != previous or previous == self.blank_id) and p != self.blank_id: decoded_prediction.append(p) previous = p hypothesis = "".join([self.labels_map[c] for c in decoded_prediction]) hypotheses.append(hypothesis) return hypotheses # class WER(object): # """ # Word Error Rate (WER). # # Parameters: # ---------- # vocabulary : list of str # Vocabulary of the dataset. # """ # def __init__(self, # vocabulary): # super().__init__() # self.blank_id = len(vocabulary) # self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))]) # # self.scores = 0 # self.words = 0 # # def update(self, # hypotheses, # references): # words = 0.0 # scores = 0.0 # # for h, r in zip(hypotheses, references): # h_list = h.split() # r_list = r.split() # words += len(r_list) # scores += editdistance.eval(h_list, r_list) # # self.scores += scores # self.words += words # # def compute(self): # return float(self.scores) / self.words class QuartzNet(nn.Module): def __init__(self, raw_net, num_classes): super(QuartzNet, self).__init__() self.in_size = None self.num_classes = num_classes self.preprocessor = raw_net.preprocessor self.encoder = raw_net.encoder self.decoder = raw_net.decoder # self.vocabulary = raw_net.cfg.decoder.params.vocabulary self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x, lens): from nemo.core import typecheck with typecheck.disable_checks(): x, lens = self.encoder(x, lens) x = self.decoder(x) return x, lens # path_pref = "../../../../../imgclsmob_data/nemo/" path_pref = "../imgclsmob_data/nemo/" def oth_quartznet5x5_en_ls(pretrained=False, num_classes=29, **kwargs): from nemo.collections.asr.models import EncDecCTCModel quartznet_nemo_path = path_pref + "QuartzNet5x5LS-En_08ecf82a.nemo" raw_net = EncDecCTCModel.restore_from(quartznet_nemo_path) net = QuartzNet(raw_net=raw_net, num_classes=num_classes) net = net.cpu() return net#, raw_net def oth_quartznet15x5_en(pretrained=False, num_classes=29, **kwargs): from nemo.collections.asr.models import EncDecCTCModel quartznet_nemo_path = path_pref + "QuartzNet15x5Base-En_3dbcc2ff.nemo" raw_net = EncDecCTCModel.restore_from(quartznet_nemo_path) net = QuartzNet(raw_net=raw_net, num_classes=num_classes) net = net.cpu() return net#, raw_net def oth_quartznet15x5_en_nr(pretrained=False, num_classes=29, **kwargs): from nemo.collections.asr.models import EncDecCTCModel quartznet_nemo_path = path_pref + "QuartzNet15x5NR-En_b05e34f3.nemo" raw_net = EncDecCTCModel.restore_from(quartznet_nemo_path) net = QuartzNet(raw_net=raw_net, num_classes=num_classes) net = net.cpu() return net#, raw_net def oth_quartznet15x5_fr(pretrained=False, num_classes=43, **kwargs): from nemo.collections.asr.models import EncDecCTCModel quartznet_nemo_path = path_pref + "stt_fr_quartznet15x5_a3fdb084.nemo" raw_net = EncDecCTCModel.restore_from(quartznet_nemo_path) net = QuartzNet(raw_net=raw_net, num_classes=num_classes) net = net.cpu() return net#, raw_net def oth_quartznet15x5_de(pretrained=False, num_classes=32, **kwargs): from nemo.collections.asr.models import EncDecCTCModel quartznet_nemo_path = path_pref + "stt_de_quartznet15x5_6ae5d87d.nemo" raw_net = EncDecCTCModel.restore_from(quartznet_nemo_path) net = QuartzNet(raw_net=raw_net, num_classes=num_classes) net = net.cpu() return net#, raw_net def oth_quartznet15x5_it(pretrained=False, num_classes=39, **kwargs): from nemo.collections.asr.models import EncDecCTCModel quartznet_nemo_path = path_pref + "stt_it_quartznet15x5_0f6e4537.nemo" raw_net = EncDecCTCModel.restore_from(quartznet_nemo_path) net = QuartzNet(raw_net=raw_net, num_classes=num_classes) net = net.cpu() return net#, raw_net def oth_quartznet15x5_es(pretrained=False, num_classes=36, **kwargs): from nemo.collections.asr.models import EncDecCTCModel quartznet_nemo_path = path_pref + "stt_es_quartznet15x5_f2083912.nemo" raw_net = EncDecCTCModel.restore_from(quartznet_nemo_path) net = QuartzNet(raw_net=raw_net, num_classes=num_classes) net = net.cpu() return net#, raw_net def oth_quartznet15x5_ca(pretrained=False, num_classes=39, **kwargs): from nemo.collections.asr.models import EncDecCTCModel quartznet_nemo_path = path_pref + "stt_ca_quartznet15x5_b1a4fa3c.nemo" raw_net = EncDecCTCModel.restore_from(quartznet_nemo_path) net = QuartzNet(raw_net=raw_net, num_classes=num_classes) net = net.cpu() return net#, raw_net def oth_quartznet15x5_pl(pretrained=False, num_classes=34, **kwargs): from nemo.collections.asr.models import EncDecCTCModel quartznet_nemo_path = path_pref + "stt_pl_quartznet15x5_9dd685f7.nemo" raw_net = EncDecCTCModel.restore_from(quartznet_nemo_path) net = QuartzNet(raw_net=raw_net, num_classes=num_classes) net = net.cpu() return net#, raw_net def oth_quartznet15x5_ru(pretrained=False, num_classes=35, **kwargs): from nemo.collections.asr.models import EncDecCTCModel quartznet_nemo_path = path_pref + "stt_ru_quartznet15x5_88a3e5aa.nemo" raw_net = EncDecCTCModel.restore_from(quartznet_nemo_path) net = QuartzNet(raw_net=raw_net, num_classes=num_classes) net = net.cpu() return net#, raw_net def oth_jasperdr10x5_en(pretrained=False, num_classes=29, **kwargs): from nemo.collections.asr.models import EncDecCTCModel quartznet_nemo_path = path_pref + "Jasper10x5Dr-En_2b94c9d1.nemo" raw_net = EncDecCTCModel.restore_from(quartznet_nemo_path) net = QuartzNet(raw_net=raw_net, num_classes=num_classes) net = net.cpu() return net#, raw_net def oth_jasperdr10x5_en_nr(pretrained=False, num_classes=29, **kwargs): from nemo.collections.asr.models import EncDecCTCModel quartznet_nemo_path = path_pref + "stt_en_jasper10x5dr_0d5ebc6c.nemo" raw_net = EncDecCTCModel.restore_from(quartznet_nemo_path) net = QuartzNet(raw_net=raw_net, num_classes=num_classes) net = net.cpu() return net#, raw_net def oth_quartznet15x5_ru34(pretrained=False, num_classes=34, **kwargs): from nemo.collections.asr.models import EncDecCTCModel quartznet_nemo_path = path_pref + "QuartzNet15x5_golos_1a63a2d8.nemo" raw_net = EncDecCTCModel.restore_from(quartznet_nemo_path) net = QuartzNet(raw_net=raw_net, num_classes=num_classes) net = net.cpu() return net#, raw_net def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import numpy as np import torch pretrained = True audio_features = 64 models = [ # oth_quartznet5x5_en_ls, # oth_quartznet15x5_en, # oth_quartznet15x5_en_nr, # oth_quartznet15x5_fr, # oth_quartznet15x5_de, # oth_quartznet15x5_it, # oth_quartznet15x5_es, # oth_quartznet15x5_ca, # oth_quartznet15x5_pl, # oth_quartznet15x5_ru, # oth_jasperdr10x5_en, # oth_jasperdr10x5_en_nr, oth_quartznet15x5_ru34, ] for model in models: net = model( pretrained=pretrained) num_classes = net.num_classes # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != oth_quartznet5x5_en_ls or weight_count == 6713181) assert (model != oth_quartznet15x5_en or weight_count == 18924381) assert (model != oth_quartznet15x5_en_nr or weight_count == 18924381) assert (model != oth_quartznet15x5_fr or weight_count == 18938731) assert (model != oth_quartznet15x5_de or weight_count == 18927456) assert (model != oth_quartznet15x5_it or weight_count == 18934631) assert (model != oth_quartznet15x5_es or weight_count == 18931556) assert (model != oth_quartznet15x5_ca or weight_count == 18934631) assert (model != oth_quartznet15x5_pl or weight_count == 18929506) assert (model != oth_quartznet15x5_ru or weight_count == 18930531) assert (model != oth_jasperdr10x5_en or weight_count == 332632349) assert (model != oth_jasperdr10x5_en_nr or weight_count == 332632349) assert (model != oth_quartznet15x5_ru34 or weight_count == 18929506) batch = 3 seq_len = np.random.randint(60, 150, batch) seq_len_max = seq_len.max() + 2 x = torch.randn(batch, audio_features, seq_len_max) x_len = torch.tensor(seq_len, dtype=torch.long, device=x.device) # x_len = torch.full((batch, 1), seq_len - 2).to(dtype=torch.long, device=x.device) y, y_len = net(x, x_len) # y.sum().backward() assert (y.size()[0] == batch) assert (y.size()[1] in [seq_len_max // 2, seq_len_max // 2 + 1]) assert (y.size()[2] == num_classes) if __name__ == "__main__": _test()
11,139
34.253165
111
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/others/oth_inception_resnet_v1.py
__all__ = ['oth_inceptionresnetv1'] import torch from torch import nn class BasicConv2d(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): super().__init__() self.conv = nn.Conv2d( in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False ) # verify bias false self.bn = nn.BatchNorm2d( out_planes, eps=0.001, # value found in tensorflow momentum=0.1, # default pytorch value affine=True ) self.relu = nn.ReLU(inplace=False) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.relu(x) return x class Block35(nn.Module): def __init__(self, scale=1.0): super().__init__() self.scale = scale self.branch0 = BasicConv2d(256, 32, kernel_size=1, stride=1) self.branch1 = nn.Sequential( BasicConv2d(256, 32, kernel_size=1, stride=1), BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) ) self.branch2 = nn.Sequential( BasicConv2d(256, 32, kernel_size=1, stride=1), BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1), BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) ) self.conv2d = nn.Conv2d(96, 256, kernel_size=1, stride=1) self.relu = nn.ReLU(inplace=False) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) out = self.conv2d(out) out = out * self.scale + x out = self.relu(out) return out class Block17(nn.Module): def __init__(self, scale=1.0): super().__init__() self.scale = scale self.branch0 = BasicConv2d(896, 128, kernel_size=1, stride=1) self.branch1 = nn.Sequential( BasicConv2d(896, 128, kernel_size=1, stride=1), BasicConv2d(128, 128, kernel_size=(1,7), stride=1, padding=(0,3)), BasicConv2d(128, 128, kernel_size=(7,1), stride=1, padding=(3,0)) ) self.conv2d = nn.Conv2d(256, 896, kernel_size=1, stride=1) self.relu = nn.ReLU(inplace=False) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x out = self.relu(out) return out class Block8(nn.Module): def __init__(self, scale=1.0, noReLU=False): super().__init__() self.scale = scale self.noReLU = noReLU self.branch0 = BasicConv2d(1792, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential( BasicConv2d(1792, 192, kernel_size=1, stride=1), BasicConv2d(192, 192, kernel_size=(1,3), stride=1, padding=(0,1)), BasicConv2d(192, 192, kernel_size=(3,1), stride=1, padding=(1,0)) ) self.conv2d = nn.Conv2d(384, 1792, kernel_size=1, stride=1) if not self.noReLU: self.relu = nn.ReLU(inplace=False) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x if not self.noReLU: out = self.relu(out) return out class Mixed_6a(nn.Module): def __init__(self): super().__init__() self.branch0 = BasicConv2d(256, 384, kernel_size=3, stride=2) self.branch1 = nn.Sequential( BasicConv2d(256, 192, kernel_size=1, stride=1), BasicConv2d(192, 192, kernel_size=3, stride=1, padding=1), BasicConv2d(192, 256, kernel_size=3, stride=2) ) self.branch2 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) return out class Mixed_7a(nn.Module): def __init__(self): super().__init__() self.branch0 = nn.Sequential( BasicConv2d(896, 256, kernel_size=1, stride=1), BasicConv2d(256, 384, kernel_size=3, stride=2) ) self.branch1 = nn.Sequential( BasicConv2d(896, 256, kernel_size=1, stride=1), BasicConv2d(256, 256, kernel_size=3, stride=2) ) self.branch2 = nn.Sequential( BasicConv2d(896, 256, kernel_size=1, stride=1), BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1), BasicConv2d(256, 256, kernel_size=3, stride=2) ) self.branch3 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class InceptionResnetV1(nn.Module): """Inception Resnet V1 model with optional loading of pretrained weights. Model parameters can be loaded based on pretraining on the VGGFace2 or CASIA-Webface datasets. Pretrained state_dicts are automatically downloaded on model instantiation if requested and cached in the torch cache. Subsequent instantiations use the cache rather than redownloading. Keyword Arguments: num_classes {int} -- Number of output classes. If 'pretrained' is set and num_classes not equal to that used for the pretrained model, the final linear layer will be randomly initialized. (default: {None}) dropout_prob {float} -- Dropout probability. (default: {0.6}) """ def __init__(self, num_classes=1000, dropout_prob=0.6): super().__init__() # Set simple attributes self.num_classes = num_classes # Define layers self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2) self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1) self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1) self.maxpool_3a = nn.MaxPool2d(3, stride=2) self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1) self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1) self.conv2d_4b = BasicConv2d(192, 256, kernel_size=3, stride=2) self.repeat_1 = nn.Sequential( Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), ) self.mixed_6a = Mixed_6a() self.repeat_2 = nn.Sequential( Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), ) self.mixed_7a = Mixed_7a() self.repeat_3 = nn.Sequential( Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20), ) self.block8 = Block8(noReLU=True) self.avgpool_1a = nn.AdaptiveAvgPool2d(1) self.dropout = nn.Dropout(dropout_prob) self.last_linear = nn.Linear(1792, 512, bias=False) self.last_bn = nn.BatchNorm1d(512, eps=0.001, momentum=0.1, affine=True) self.logits = nn.Linear(512, self.num_classes) def forward(self, x): x = self.conv2d_1a(x) x = self.conv2d_2a(x) x = self.conv2d_2b(x) x = self.maxpool_3a(x) x = self.conv2d_3b(x) x = self.conv2d_4a(x) x = self.conv2d_4b(x) x = self.repeat_1(x) x = self.mixed_6a(x) x = self.repeat_2(x) x = self.mixed_7a(x) x = self.repeat_3(x) x = self.block8(x) x = self.avgpool_1a(x) x = self.dropout(x) x = self.last_linear(x.view(x.shape[0], -1)) x = self.last_bn(x) x = self.logits(x) return x def oth_inceptionresnetv1(pretrained=False, **kwargs): return InceptionResnetV1(**kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ oth_inceptionresnetv1, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != oth_inceptionresnetv1 or weight_count == 23995624) x = torch.randn(1, 3, 299, 299) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
9,240
28.336508
97
py
imgclsmob
imgclsmob-master/keras_/setup.py
from setuptools import setup, find_packages from os import path from io import open here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name='kerascv', version='0.0.40', description='Image classification models for Keras', license='MIT', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/osmr/imgclsmob', author='Oleg Sémery', author_email='osemery@gmail.com', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Image Recognition', ], keywords='machine-learning deep-learning neuralnetwork image-classification keras keras-mxnet imagenet vgg resnet ' 'resnext senet densenet darknet squeezenet squeezenext shufflenet menet mobilenent igcv3 mnasnet ' 'efficientnet', packages=find_packages(exclude=['others', '*.others', 'others.*', '*.others.*']), include_package_data=True, install_requires=['h5py'], )
1,280
36.676471
119
py
imgclsmob
imgclsmob-master/keras_/utils.py
import math import logging import os from keras import backend as K from keras.utils.np_utils import to_categorical import mxnet as mx from keras_.kerascv.model_provider import get_model def prepare_ke_context(num_gpus, batch_size): batch_size *= max(1, num_gpus) return batch_size def get_data_rec(rec_train, rec_train_idx, rec_val, rec_val_idx, batch_size, num_workers, input_image_size=(224, 224), resize_inv_factor=0.875, only_val=False): assert (resize_inv_factor > 0.0) if isinstance(input_image_size, int): input_image_size = (input_image_size, input_image_size) rec_train = os.path.expanduser(rec_train) rec_train_idx = os.path.expanduser(rec_train_idx) rec_val = os.path.expanduser(rec_val) rec_val_idx = os.path.expanduser(rec_val_idx) jitter_param = 0.4 lighting_param = 0.1 mean_rgb = [123.68, 116.779, 103.939] std_rgb = [58.393, 57.12, 57.375] data_shape = (3,) + input_image_size resize_value = int(math.ceil(float(input_image_size[0]) / resize_inv_factor)) if not only_val: train_data = mx.io.ImageRecordIter( path_imgrec=rec_train, path_imgidx=rec_train_idx, preprocess_threads=num_workers, shuffle=True, batch_size=batch_size, data_shape=data_shape, mean_r=mean_rgb[0], mean_g=mean_rgb[1], mean_b=mean_rgb[2], std_r=std_rgb[0], std_g=std_rgb[1], std_b=std_rgb[2], rand_mirror=True, random_resized_crop=True, max_aspect_ratio=(4. / 3.), min_aspect_ratio=(3. / 4.), max_random_area=1, min_random_area=0.08, brightness=jitter_param, saturation=jitter_param, contrast=jitter_param, pca_noise=lighting_param, ) else: train_data = None val_data = mx.io.ImageRecordIter( path_imgrec=rec_val, path_imgidx=rec_val_idx, preprocess_threads=num_workers, shuffle=False, batch_size=batch_size, resize=resize_value, data_shape=data_shape, mean_r=mean_rgb[0], mean_g=mean_rgb[1], mean_b=mean_rgb[2], std_r=std_rgb[0], std_g=std_rgb[1], std_b=std_rgb[2], ) return train_data, val_data def get_data_generator(data_iterator, num_classes): def get_arrays(db): data = db.data[0].asnumpy() if K.image_data_format() == "channels_last": data = data.transpose((0, 2, 3, 1)) labels = to_categorical( y=db.label[0].asnumpy(), num_classes=num_classes) return data, labels while True: try: db = data_iterator.next() except StopIteration: # logging.warning("get_data exception due to end of data - resetting iterator") data_iterator.reset() db = data_iterator.next() finally: yield get_arrays(db) def prepare_model(model_name, use_pretrained, pretrained_model_file_path): kwargs = {"pretrained": use_pretrained} net = get_model(model_name, **kwargs) if pretrained_model_file_path: assert (os.path.isfile(pretrained_model_file_path)) logging.info("Loading model: {}".format(pretrained_model_file_path)) net.load_weights(filepath=pretrained_model_file_path) return net def backend_agnostic_compile(model, loss, optimizer, metrics, num_gpus): keras_backend_exist = True try: K._backend except (NameError, AttributeError): keras_backend_exist = False if keras_backend_exist and (K._backend == "mxnet"): mx_ctx = ["gpu(%d)" % i for i in range(num_gpus)] if num_gpus > 0 else ["cpu()"] model.compile( loss=loss, optimizer=optimizer, metrics=metrics, context=mx_ctx) else: if num_gpus > 1: logging.info("Warning: num_gpus > 1 but not using MxNet backend") model.compile( loss=loss, optimizer=optimizer, metrics=metrics)
4,497
28.592105
91
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/shufflenetv2.py
""" ShuffleNet V2 for ImageNet-1K, implemented in Keras. Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. """ __all__ = ['shufflenetv2', 'shufflenetv2_wd2', 'shufflenetv2_w1', 'shufflenetv2_w3d2', 'shufflenetv2_w2'] import os from keras import layers as nn from keras.models import Model from .common import conv1x1, depthwise_conv3x3, conv1x1_block, conv3x3_block, maxpool2d, channel_shuffle_lambda,\ se_block, batchnorm, is_channels_first, get_channel_axis, flatten def shuffle_unit(x, in_channels, out_channels, downsample, use_se, use_residual, name="shuffle_unit"): """ ShuffleNetV2 unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. downsample : bool Whether do downsample. use_se : bool Whether to use SE block. use_residual : bool Whether to use residual connection. name : str, default 'shuffle_unit' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ mid_channels = out_channels // 2 if downsample: y1 = depthwise_conv3x3( x=x, channels=in_channels, strides=2, name=name + "/dw_conv4") y1 = batchnorm( x=y1, name=name + "/dw_bn4") y1 = conv1x1( x=y1, in_channels=in_channels, out_channels=mid_channels, name=name + "/expand_conv5") y1 = batchnorm( x=y1, name=name + "/expand_bn5") y1 = nn.Activation("relu", name=name + "/expand_activ5")(y1) x2 = x else: in_split2_channels = in_channels // 2 if is_channels_first(): y1 = nn.Lambda(lambda z: z[:, 0:in_split2_channels, :, :])(x) x2 = nn.Lambda(lambda z: z[:, in_split2_channels:, :, :])(x) else: y1 = nn.Lambda(lambda z: z[:, :, :, 0:in_split2_channels])(x) x2 = nn.Lambda(lambda z: z[:, :, :, in_split2_channels:])(x) y2 = conv1x1( x=x2, in_channels=(in_channels if downsample else mid_channels), out_channels=mid_channels, name=name + "/compress_conv1") y2 = batchnorm( x=y2, name=name + "/compress_bn1") y2 = nn.Activation("relu", name=name + "/compress_activ1")(y2) y2 = depthwise_conv3x3( x=y2, channels=mid_channels, strides=(2 if downsample else 1), name=name + "/dw_conv2") y2 = batchnorm( x=y2, name=name + "/dw_bn2") y2 = conv1x1( x=y2, in_channels=mid_channels, out_channels=mid_channels, name=name + "/expand_conv3") y2 = batchnorm( x=y2, name=name + "/expand_bn3") y2 = nn.Activation("relu", name=name + "/expand_activ3")(y2) if use_se: y2 = se_block( x=y2, channels=mid_channels, name=name + "/se") if use_residual and not downsample: y2 = nn.add([y2, x2], name=name + "/add") x = nn.concatenate([y1, y2], axis=get_channel_axis(), name=name + "/concat") x = channel_shuffle_lambda( channels=out_channels, groups=2, name=name + "/c_shuffle")(x) return x def shuffle_init_block(x, in_channels, out_channels, name="shuffle_init_block"): """ ShuffleNetV2 specific initial block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. name : str, default 'shuffle_init_block' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ x = conv3x3_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=2, name=name + "/conv") x = maxpool2d( x=x, pool_size=3, strides=2, padding=0, ceil_mode=True, name=name + "/pool") return x def shufflenetv2(channels, init_block_channels, final_block_channels, use_se=False, use_residual=False, in_channels=3, in_size=(224, 224), classes=1000): """ ShuffleNetV2 model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. use_se : bool, default False Whether to use SE block. use_residual : bool, default False Whether to use residual connections. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = shuffle_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels, name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): downsample = (j == 0) x = shuffle_unit( x=x, in_channels=in_channels, out_channels=out_channels, downsample=downsample, use_se=use_se, use_residual=use_residual, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = conv1x1_block( x=x, in_channels=in_channels, out_channels=final_block_channels, name="features/final_block") in_channels = final_block_channels x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) # x = nn.Flatten()(x) x = flatten(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_shufflenetv2(width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create ShuffleNetV2 model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ init_block_channels = 24 final_block_channels = 1024 layers = [4, 8, 4] channels_per_layers = [116, 232, 464] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] if width_scale > 1.5: final_block_channels = int(final_block_channels * width_scale) net = shufflenetv2( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def shufflenetv2_wd2(**kwargs): """ ShuffleNetV2 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_shufflenetv2(width_scale=(12.0 / 29.0), model_name="shufflenetv2_wd2", **kwargs) def shufflenetv2_w1(**kwargs): """ ShuffleNetV2 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_shufflenetv2(width_scale=1.0, model_name="shufflenetv2_w1", **kwargs) def shufflenetv2_w3d2(**kwargs): """ ShuffleNetV2 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_shufflenetv2(width_scale=(44.0 / 29.0), model_name="shufflenetv2_w3d2", **kwargs) def shufflenetv2_w2(**kwargs): """ ShuffleNetV2 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_shufflenetv2(width_scale=(61.0 / 29.0), model_name="shufflenetv2_w2", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ shufflenetv2_wd2, shufflenetv2_w1, shufflenetv2_w3d2, shufflenetv2_w2, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != shufflenetv2_wd2 or weight_count == 1366792) assert (model != shufflenetv2_w1 or weight_count == 2278604) assert (model != shufflenetv2_w3d2 or weight_count == 4406098) assert (model != shufflenetv2_w2 or weight_count == 7601686) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
11,732
29.396373
115
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/igcv3.py
""" IGCV3 for ImageNet-1K, implemented in Keras. Original paper: 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,' https://arxiv.org/abs/1806.00178. """ __all__ = ['igcv3', 'igcv3_w1', 'igcv3_w3d4', 'igcv3_wd2', 'igcv3_wd4'] import os from keras import layers as nn from keras.models import Model from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, channel_shuffle_lambda, is_channels_first, flatten def inv_res_unit(x, in_channels, out_channels, strides, expansion, name="inv_res_unit"): """ So-called 'Inverted Residual Unit' layer. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. expansion : bool Whether do expansion of channels. name : str, default 'inv_res_unit' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ residual = (in_channels == out_channels) and (strides == 1) mid_channels = in_channels * 6 if expansion else in_channels groups = 2 if residual: identity = x x = conv1x1_block( x=x, in_channels=in_channels, out_channels=mid_channels, groups=groups, activation=None, name=name + "/conv1") x = channel_shuffle_lambda( channels=mid_channels, groups=groups, name=name + "/c_shuffle")(x) x = dwconv3x3_block( x=x, in_channels=mid_channels, out_channels=mid_channels, strides=strides, activation="relu6", name=name + "/conv2") x = conv1x1_block( x=x, in_channels=mid_channels, out_channels=out_channels, groups=groups, activation=None, name=name + "/conv3") if residual: x = nn.add([x, identity], name=name + "/add") return x def igcv3(channels, init_block_channels, final_block_channels, in_channels=3, in_size=(224, 224), classes=1000): """ IGCV3 model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,' https://arxiv.org/abs/1806.00178. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = conv3x3_block( x=input, in_channels=in_channels, out_channels=init_block_channels, strides=2, activation="relu6", name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 expansion = (i != 0) or (j != 0) x = inv_res_unit( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, expansion=expansion, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = conv1x1_block( x=x, in_channels=in_channels, out_channels=final_block_channels, activation="relu6", name="features/final_block") in_channels = final_block_channels x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) # x = nn.Flatten()(x) x = flatten(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_igcv3(width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create IGCV3-D model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ init_block_channels = 32 final_block_channels = 1280 layers = [1, 4, 6, 8, 6, 6, 1] downsample = [0, 1, 1, 1, 0, 1, 0] channels_per_layers = [16, 24, 32, 64, 96, 160, 320] from functools import reduce channels = reduce( lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(channels_per_layers, layers, downsample), [[]]) if width_scale != 1.0: def make_even(x): return x if (x % 2 == 0) else x + 1 channels = [[make_even(int(cij * width_scale)) for cij in ci] for ci in channels] init_block_channels = make_even(int(init_block_channels * width_scale)) if width_scale > 1.0: final_block_channels = make_even(int(final_block_channels * width_scale)) net = igcv3( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def igcv3_w1(**kwargs): """ IGCV3-D 1.0x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,' https://arxiv.org/abs/1806.00178. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_igcv3(width_scale=1.0, model_name="igcv3_w1", **kwargs) def igcv3_w3d4(**kwargs): """ IGCV3-D 0.75x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,' https://arxiv.org/abs/1806.00178. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_igcv3(width_scale=0.75, model_name="igcv3_w3d4", **kwargs) def igcv3_wd2(**kwargs): """ IGCV3-D 0.5x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,' https://arxiv.org/abs/1806.00178. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_igcv3(width_scale=0.5, model_name="igcv3_wd2", **kwargs) def igcv3_wd4(**kwargs): """ IGCV3-D 0.25x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,' https://arxiv.org/abs/1806.00178. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_igcv3(width_scale=0.25, model_name="igcv3_wd4", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ igcv3_w1, igcv3_w3d4, igcv3_wd2, igcv3_wd4, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != igcv3_w1 or weight_count == 3491688) assert (model != igcv3_w3d4 or weight_count == 2638084) assert (model != igcv3_wd2 or weight_count == 1985528) assert (model != igcv3_wd4 or weight_count == 1534020) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
9,422
29.495146
117
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/preresnet.py
""" PreResNet for ImageNet-1K, implemented in Keras. Original paper: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. """ __all__ = ['preresnet', 'preresnet10', 'preresnet12', 'preresnet14', 'preresnetbc14b', 'preresnet16', 'preresnet18_wd4', 'preresnet18_wd2', 'preresnet18_w3d4', 'preresnet18', 'preresnet26', 'preresnetbc26b', 'preresnet34', 'preresnetbc38b', 'preresnet50', 'preresnet50b', 'preresnet101', 'preresnet101b', 'preresnet152', 'preresnet152b', 'preresnet200', 'preresnet200b', 'preresnet269b', 'preres_block', 'preres_bottleneck_block', 'preres_init_block', 'preres_activation'] import os from keras import layers as nn from keras.models import Model from .common import pre_conv1x1_block, pre_conv3x3_block, conv2d, conv1x1, batchnorm, is_channels_first, flatten def preres_block(x, in_channels, out_channels, strides, name="preres_block"): """ Simple PreResNet block for residual path in PreResNet unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. name : str, default 'preres_block' Block name. Returns: ------- tuple of two keras.backend tensor/variable/symbol Resulted tensor and preactivated input tensor. """ x, x_pre_activ = pre_conv3x3_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, return_preact=True, name=name + "/conv1") x = pre_conv3x3_block( x=x, in_channels=in_channels, out_channels=out_channels, name=name + "/conv2") return x, x_pre_activ def preres_bottleneck_block(x, in_channels, out_channels, strides, conv1_stride, name="preres_bottleneck_block"): """ PreResNet bottleneck block for residual path in PreResNet unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. name : str, default 'preres_bottleneck_block' Block name. Returns: ------- tuple of two keras.backend tensor/variable/symbol Resulted tensor and preactivated input tensor. """ mid_channels = out_channels // 4 x, x_pre_activ = pre_conv1x1_block( x=x, in_channels=in_channels, out_channels=mid_channels, strides=(strides if conv1_stride else 1), return_preact=True, name=name + "/conv1") x = pre_conv3x3_block( x=x, in_channels=in_channels, out_channels=mid_channels, strides=(1 if conv1_stride else strides), name=name + "/conv2") x = pre_conv1x1_block( x=x, in_channels=in_channels, out_channels=out_channels, name=name + "/conv3") return x, x_pre_activ def preres_unit(x, in_channels, out_channels, strides, bottleneck, conv1_stride, name="preres_unit"): """ PreResNet unit with residual connection. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. name : str, default 'preres_unit' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor. """ identity = x if bottleneck: x, x_pre_activ = preres_bottleneck_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, conv1_stride=conv1_stride, name=name + "/body") else: x, x_pre_activ = preres_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, name=name + "/body") resize_identity = (in_channels != out_channels) or (strides != 1) if resize_identity: identity = conv1x1( x=x_pre_activ, in_channels=in_channels, out_channels=out_channels, strides=strides, name=name + "/identity_conv") x = nn.add([x, identity], name=name + "/add") return x def preres_init_block(x, in_channels, out_channels, name="preres_init_block"): """ PreResNet specific initial block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. name : str, default 'preres_init_block' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ x = conv2d( x=x, in_channels=in_channels, out_channels=out_channels, kernel_size=7, strides=2, padding=3, use_bias=False, name=name + "/conv") x = batchnorm( x=x, name=name + "/bn") x = nn.Activation("relu", name=name + "/activ")(x) x = nn.MaxPool2D( pool_size=3, strides=2, padding="same", name=name + "/pool")(x) return x def preres_activation(x, name="preres_activation"): """ PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. name : str, default 'preres_activation' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ x = batchnorm( x=x, name=name + "/bn") x = nn.Activation("relu", name=name + "/activ")(x) return x def preresnet(channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000): """ PreResNet model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = preres_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels, name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 x = preres_unit( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, bottleneck=bottleneck, conv1_stride=conv1_stride, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = preres_activation( x=x, name="features/post_activ") x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) # x = nn.Flatten()(x) x = flatten(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_preresnet(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create PreResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] elif blocks == 269: layers = [3, 30, 48, 8] else: raise ValueError("Unsupported PreResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = preresnet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def preresnet10(**kwargs): """ PreResNet-10 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=10, model_name="preresnet10", **kwargs) def preresnet12(**kwargs): """ PreResNet-12 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=12, model_name="preresnet12", **kwargs) def preresnet14(**kwargs): """ PreResNet-14 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=14, model_name="preresnet14", **kwargs) def preresnetbc14b(**kwargs): """ PreResNet-BC-14b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="preresnetbc14b", **kwargs) def preresnet16(**kwargs): """ PreResNet-16 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=16, model_name="preresnet16", **kwargs) def preresnet18_wd4(**kwargs): """ PreResNet-18 model with 0.25 width scale from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=18, width_scale=0.25, model_name="preresnet18_wd4", **kwargs) def preresnet18_wd2(**kwargs): """ PreResNet-18 model with 0.5 width scale from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=18, width_scale=0.5, model_name="preresnet18_wd2", **kwargs) def preresnet18_w3d4(**kwargs): """ PreResNet-18 model with 0.75 width scale from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=18, width_scale=0.75, model_name="preresnet18_w3d4", **kwargs) def preresnet18(**kwargs): """ PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=18, model_name="preresnet18", **kwargs) def preresnet26(**kwargs): """ PreResNet-26 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=26, bottleneck=False, model_name="preresnet26", **kwargs) def preresnetbc26b(**kwargs): """ PreResNet-BC-26b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="preresnetbc26b", **kwargs) def preresnet34(**kwargs): """ PreResNet-34 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=34, model_name="preresnet34", **kwargs) def preresnetbc38b(**kwargs): """ PreResNet-BC-38b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="preresnetbc38b", **kwargs) def preresnet50(**kwargs): """ PreResNet-50 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=50, model_name="preresnet50", **kwargs) def preresnet50b(**kwargs): """ PreResNet-50 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=50, conv1_stride=False, model_name="preresnet50b", **kwargs) def preresnet101(**kwargs): """ PreResNet-101 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=101, model_name="preresnet101", **kwargs) def preresnet101b(**kwargs): """ PreResNet-101 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=101, conv1_stride=False, model_name="preresnet101b", **kwargs) def preresnet152(**kwargs): """ PreResNet-152 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=152, model_name="preresnet152", **kwargs) def preresnet152b(**kwargs): """ PreResNet-152 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=152, conv1_stride=False, model_name="preresnet152b", **kwargs) def preresnet200(**kwargs): """ PreResNet-200 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=200, model_name="preresnet200", **kwargs) def preresnet200b(**kwargs): """ PreResNet-200 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=200, conv1_stride=False, model_name="preresnet200b", **kwargs) def preresnet269b(**kwargs): """ PreResNet-269 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_preresnet(blocks=269, conv1_stride=False, model_name="preresnet269b", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ preresnet10, preresnet12, preresnet14, preresnetbc14b, preresnet16, preresnet18_wd4, preresnet18_wd2, preresnet18_w3d4, preresnet18, preresnet26, preresnetbc26b, preresnet34, preresnetbc38b, preresnet50, preresnet50b, preresnet101, preresnet101b, preresnet152, preresnet152b, preresnet200, preresnet200b, preresnet269b, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != preresnet10 or weight_count == 5417128) assert (model != preresnet12 or weight_count == 5491112) assert (model != preresnet14 or weight_count == 5786536) assert (model != preresnetbc14b or weight_count == 10057384) assert (model != preresnet16 or weight_count == 6967208) assert (model != preresnet18_wd4 or weight_count == 3935960) assert (model != preresnet18_wd2 or weight_count == 5802440) assert (model != preresnet18_w3d4 or weight_count == 8473784) assert (model != preresnet18 or weight_count == 11687848) assert (model != preresnet26 or weight_count == 17958568) assert (model != preresnetbc26b or weight_count == 15987624) assert (model != preresnet34 or weight_count == 21796008) assert (model != preresnetbc38b or weight_count == 21917864) assert (model != preresnet50 or weight_count == 25549480) assert (model != preresnet50b or weight_count == 25549480) assert (model != preresnet101 or weight_count == 44541608) assert (model != preresnet101b or weight_count == 44541608) assert (model != preresnet152 or weight_count == 60185256) assert (model != preresnet152b or weight_count == 60185256) assert (model != preresnet200 or weight_count == 64666280) assert (model != preresnet200b or weight_count == 64666280) assert (model != preresnet269b or weight_count == 102065832) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
26,177
31.398515
120
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/shufflenetv2b.py
""" ShuffleNet V2 for ImageNet-1K, implemented in Keras. The alternative variant. Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. """ __all__ = ['shufflenetv2b', 'shufflenetv2b_wd2', 'shufflenetv2b_w1', 'shufflenetv2b_w3d2', 'shufflenetv2b_w2'] import os from keras import layers as nn from keras.models import Model from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, channel_shuffle_lambda, maxpool2d, se_block,\ is_channels_first, get_channel_axis, flatten def shuffle_unit(x, in_channels, out_channels, downsample, use_se, use_residual, name="shuffle_unit"): """ ShuffleNetV2(b) unit. Parameters: ---------- x : Tensor Input tensor. in_channels : int Number of input channels. out_channels : int Number of output channels. downsample : bool Whether do downsample. use_se : bool Whether to use SE block. use_residual : bool Whether to use residual connection. name : str, default 'shuffle_unit' Unit name. Returns: ------- Tensor Resulted tensor. """ mid_channels = out_channels // 2 in_channels2 = in_channels // 2 assert (in_channels % 2 == 0) if downsample: y1 = dwconv3x3_block( x=x, in_channels=in_channels, out_channels=in_channels, strides=2, activation=None, name=name + "/shortcut_dconv") y1 = conv1x1_block( x=y1, in_channels=in_channels, out_channels=in_channels, name=name + "/shortcut_conv") x2 = x else: if is_channels_first(): y1 = nn.Lambda(lambda z: z[:, 0:in_channels2, :, :])(x) x2 = nn.Lambda(lambda z: z[:, in_channels2:, :, :])(x) else: y1 = nn.Lambda(lambda z: z[:, :, :, 0:in_channels2])(x) x2 = nn.Lambda(lambda z: z[:, :, :, in_channels2:])(x) y2_in_channels = (in_channels if downsample else in_channels2) y2_out_channels = out_channels - y2_in_channels y2 = conv1x1_block( x=x2, in_channels=y2_in_channels, out_channels=mid_channels, name=name + "/conv1") y2 = dwconv3x3_block( x=y2, in_channels=mid_channels, out_channels=mid_channels, strides=(2 if downsample else 1), activation=None, name=name + "/dconv") y2 = conv1x1_block( x=y2, in_channels=mid_channels, out_channels=y2_out_channels, name=name + "/conv2") if use_se: y2 = se_block( x=y2, channels=y2_out_channels, name=name + "/se") if use_residual and not downsample: assert (y2_out_channels == in_channels2) y2 = nn.add([y2, x2], name=name + "/add") x = nn.concatenate([y1, y2], axis=get_channel_axis(), name=name + "/concat") assert (out_channels % 2 == 0) x = channel_shuffle_lambda( channels=out_channels, groups=2, name=name + "/c_shuffle")(x) return x def shuffle_init_block(x, in_channels, out_channels, name="shuffle_init_block"): """ ShuffleNetV2(b) specific initial block. Parameters: ---------- x : Tensor Input tensor. in_channels : int Number of input channels. out_channels : int Number of output channels. name : str, default 'shuffle_init_block' Block name. Returns: ------- Tensor Resulted tensor. """ x = conv3x3_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=2, name=name + "/conv") x = maxpool2d( x=x, pool_size=3, strides=2, padding=1, ceil_mode=False, name=name + "/pool") return x def shufflenetv2b(channels, init_block_channels, final_block_channels, use_se=False, use_residual=False, in_channels=3, in_size=(224, 224), classes=1000): """ ShuffleNetV2(b) model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. use_se : bool, default False Whether to use SE block. use_residual : bool, default False Whether to use residual connections. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = shuffle_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels, name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): downsample = (j == 0) x = shuffle_unit( x=x, in_channels=in_channels, out_channels=out_channels, downsample=downsample, use_se=use_se, use_residual=use_residual, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = conv1x1_block( x=x, in_channels=in_channels, out_channels=final_block_channels, name="features/final_block") in_channels = final_block_channels x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) # x = nn.Flatten()(x) x = flatten(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_shufflenetv2b(width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create ShuffleNetV2(b) model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. Returns: ------- functor Functor for model graph creation with extra fields. """ init_block_channels = 24 final_block_channels = 1024 layers = [4, 8, 4] channels_per_layers = [116, 232, 464] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] if width_scale > 1.5: final_block_channels = int(final_block_channels * width_scale) net = shufflenetv2b( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def shufflenetv2b_wd2(**kwargs): """ ShuffleNetV2(b) 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. Returns: ------- functor Functor for model graph creation with extra fields. """ return get_shufflenetv2b( width_scale=(12.0 / 29.0), model_name="shufflenetv2b_wd2", **kwargs) def shufflenetv2b_w1(**kwargs): """ ShuffleNetV2(b) 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. Returns: ------- functor Functor for model graph creation with extra fields. """ return get_shufflenetv2b( width_scale=1.0, model_name="shufflenetv2b_w1", **kwargs) def shufflenetv2b_w3d2(**kwargs): """ ShuffleNetV2(b) 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. Returns: ------- functor Functor for model graph creation with extra fields. """ return get_shufflenetv2b( width_scale=(44.0 / 29.0), model_name="shufflenetv2b_w3d2", **kwargs) def shufflenetv2b_w2(**kwargs): """ ShuffleNetV2(b) 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. Returns: ------- functor Functor for model graph creation with extra fields. """ return get_shufflenetv2b( width_scale=(61.0 / 29.0), model_name="shufflenetv2b_w2", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ shufflenetv2b_wd2, shufflenetv2b_w1, shufflenetv2b_w3d2, shufflenetv2b_w2, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != shufflenetv2b_wd2 or weight_count == 1366792) assert (model != shufflenetv2b_w1 or weight_count == 2279760) assert (model != shufflenetv2b_w3d2 or weight_count == 4410194) assert (model != shufflenetv2b_w2 or weight_count == 7611290) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
11,952
27.941889
115
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/menet.py
""" MENet for ImageNet-1K, implemented in Keras. Original paper: 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. """ __all__ = ['menet', 'menet108_8x1_g3', 'menet128_8x1_g4', 'menet160_8x1_g8', 'menet228_12x1_g3', 'menet256_12x1_g4', 'menet348_12x1_g3', 'menet352_12x1_g8', 'menet456_24x1_g3'] import os from keras import layers as nn from keras.models import Model from .common import conv2d, conv1x1, conv3x3, depthwise_conv3x3, channel_shuffle_lambda, batchnorm, maxpool2d,\ avgpool2d, is_channels_first, get_channel_axis, flatten def me_unit(x, in_channels, out_channels, side_channels, groups, downsample, ignore_group, name="me_unit"): """ MENet unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. side_channels : int Number of side channels. groups : int Number of groups in convolution layers. downsample : bool Whether do downsample. ignore_group : bool Whether ignore group value in the first convolution layer. name : str, default 'me_unit' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ mid_channels = out_channels // 4 if downsample: out_channels -= in_channels identity = x # pointwise group convolution 1 x = conv1x1( x=x, in_channels=in_channels, out_channels=mid_channels, groups=(1 if ignore_group else groups), name=name + "/compress_conv1") x = batchnorm( x=x, name=name + "/compress_bn1") x = nn.Activation("relu", name=name + "/compress_activ")(x) x = channel_shuffle_lambda( channels=mid_channels, groups=groups, name=name + "/c_shuffle")(x) # merging y = conv1x1( x=x, in_channels=mid_channels, out_channels=side_channels, name=name + "/s_merge_conv") y = batchnorm( x=y, name=name + "/s_merge_bn") y = nn.Activation("relu", name=name + "/s_merge_activ")(y) # depthwise convolution (bottleneck) x = depthwise_conv3x3( x=x, channels=mid_channels, strides=(2 if downsample else 1), name=name + "/dw_conv2") x = batchnorm( x=x, name=name + "/dw_bn2") # evolution y = conv3x3( x=y, in_channels=side_channels, out_channels=side_channels, strides=(2 if downsample else 1), name=name + "/s_conv") y = batchnorm( x=y, name=name + "/s_conv_bn") y = nn.Activation("relu", name=name + "/s_conv_activ")(y) y = conv1x1( x=y, in_channels=side_channels, out_channels=mid_channels, name=name + "/s_evolve_conv") y = batchnorm( x=y, name=name + "/s_evolve_bn") y = nn.Activation('sigmoid', name=name + "/s_evolve_activ")(y) x = nn.multiply([x, y], name=name + "/mul") # pointwise group convolution 2 x = conv1x1( x=x, in_channels=mid_channels, out_channels=out_channels, groups=groups, name=name + "/expand_conv3") x = batchnorm( x=x, name=name + "/expand_bn3") if downsample: identity = avgpool2d( x=identity, pool_size=3, strides=2, padding=1, name=name + "/avgpool") x = nn.concatenate([x, identity], axis=get_channel_axis(), name=name + "/concat") else: x = nn.add([x, identity], name=name + "/add") x = nn.Activation("relu", name=name + "/final_activ")(x) return x def me_init_block(x, in_channels, out_channels, name="me_init_block"): """ MENet specific initial block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. name : str, default 'me_init_block' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ x = conv2d( x=x, in_channels=in_channels, out_channels=out_channels, kernel_size=3, strides=2, padding=1, use_bias=False, name=name + "/conv") x = batchnorm( x=x, name=name + "/bn") x = nn.Activation("relu", name=name + "/activ")(x) x = maxpool2d( x=x, pool_size=3, strides=2, padding=1, name=name + "/pool") return x def menet(channels, init_block_channels, side_channels, groups, in_channels=3, in_size=(224, 224), classes=1000): """ ShuffleNet model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. side_channels : int Number of side channels in a ME-unit. groups : int Number of groups in convolution layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = me_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels, name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): downsample = (j == 0) ignore_group = (i == 0) and (j == 0) x = me_unit( x=x, in_channels=in_channels, out_channels=out_channels, side_channels=side_channels, groups=groups, downsample=downsample, ignore_group=ignore_group, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) x = flatten(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_menet(first_stage_channels, side_channels, groups, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create MENet model with specific parameters. Parameters: ---------- first_stage_channels : int Number of output channels at the first stage. side_channels : int Number of side channels in a ME-unit. groups : int Number of groups in convolution layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ layers = [4, 8, 4] if first_stage_channels == 108: init_block_channels = 12 channels_per_layers = [108, 216, 432] elif first_stage_channels == 128: init_block_channels = 12 channels_per_layers = [128, 256, 512] elif first_stage_channels == 160: init_block_channels = 16 channels_per_layers = [160, 320, 640] elif first_stage_channels == 228: init_block_channels = 24 channels_per_layers = [228, 456, 912] elif first_stage_channels == 256: init_block_channels = 24 channels_per_layers = [256, 512, 1024] elif first_stage_channels == 348: init_block_channels = 24 channels_per_layers = [348, 696, 1392] elif first_stage_channels == 352: init_block_channels = 24 channels_per_layers = [352, 704, 1408] elif first_stage_channels == 456: init_block_channels = 48 channels_per_layers = [456, 912, 1824] else: raise ValueError("The {} of `first_stage_channels` is not supported".format(first_stage_channels)) channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = menet( channels=channels, init_block_channels=init_block_channels, side_channels=side_channels, groups=groups, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def menet108_8x1_g3(**kwargs): """ 108-MENet-8x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=108, side_channels=8, groups=3, model_name="menet108_8x1_g3", **kwargs) def menet128_8x1_g4(**kwargs): """ 128-MENet-8x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=128, side_channels=8, groups=4, model_name="menet128_8x1_g4", **kwargs) def menet160_8x1_g8(**kwargs): """ 160-MENet-8x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=160, side_channels=8, groups=8, model_name="menet160_8x1_g8", **kwargs) def menet228_12x1_g3(**kwargs): """ 228-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=228, side_channels=12, groups=3, model_name="menet228_12x1_g3", **kwargs) def menet256_12x1_g4(**kwargs): """ 256-MENet-12x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=256, side_channels=12, groups=4, model_name="menet256_12x1_g4", **kwargs) def menet348_12x1_g3(**kwargs): """ 348-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=348, side_channels=12, groups=3, model_name="menet348_12x1_g3", **kwargs) def menet352_12x1_g8(**kwargs): """ 352-MENet-12x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=352, side_channels=12, groups=8, model_name="menet352_12x1_g8", **kwargs) def menet456_24x1_g3(**kwargs): """ 456-MENet-24x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=456, side_channels=24, groups=3, model_name="menet456_24x1_g3", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ menet108_8x1_g3, menet128_8x1_g4, menet160_8x1_g8, menet228_12x1_g3, menet256_12x1_g4, menet348_12x1_g3, menet352_12x1_g8, menet456_24x1_g3, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != menet108_8x1_g3 or weight_count == 654516) assert (model != menet128_8x1_g4 or weight_count == 750796) assert (model != menet160_8x1_g8 or weight_count == 850120) assert (model != menet228_12x1_g3 or weight_count == 1806568) assert (model != menet256_12x1_g4 or weight_count == 1888240) assert (model != menet348_12x1_g3 or weight_count == 3368128) assert (model != menet352_12x1_g8 or weight_count == 2272872) assert (model != menet456_24x1_g3 or weight_count == 5304784) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
15,495
30.054108
116
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/efficientnet.py
""" EfficientNet for ImageNet-1K, implemented in Keras. Original paper: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. """ __all__ = ['efficientnet_model', 'efficientnet_b0', 'efficientnet_b1', 'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6', 'efficientnet_b7', 'efficientnet_b0b', 'efficientnet_b1b', 'efficientnet_b2b', 'efficientnet_b3b', 'efficientnet_b4b', 'efficientnet_b5b', 'efficientnet_b6b', 'efficientnet_b7b'] import os import math from keras import layers as nn from keras.models import Model from .common import round_channels, is_channels_first, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block,\ se_block def calc_tf_padding(x, kernel_size, strides=1, dilation=1): """ Calculate TF-same like padding size. Parameters: ---------- x : tensor Input tensor. kernel_size : int Convolution window size. strides : int, default 1 Strides of the convolution. dilation : int, default 1 Dilation value for convolution layer. Returns: ------- tuple of 4 int The size of the padding. """ height, width = x._keras_shape[2:] oh = math.ceil(height / strides) ow = math.ceil(width / strides) pad_h = max((oh - 1) * strides + (kernel_size - 1) * dilation + 1 - height, 0) pad_w = max((ow - 1) * strides + (kernel_size - 1) * dilation + 1 - width, 0) return (pad_h // 2, pad_h - pad_h // 2), (pad_w // 2, pad_w - pad_w // 2) def effi_dws_conv_unit(x, in_channels, out_channels, strides, bn_epsilon, activation, tf_mode, name="effi_dws_conv_unit"): """ EfficientNet specific depthwise separable convolution block/unit with BatchNorms and activations at each convolution layers. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the second convolution layer. bn_epsilon : float Small float added to variance in Batch norm. activation : str Name of activation function. tf_mode : bool Whether to use TF-like mode. name : str, default 'effi_dws_conv_unit' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ residual = (in_channels == out_channels) and (strides == 1) if residual: identity = x if tf_mode: x = nn.ZeroPadding2D( padding=calc_tf_padding(x, kernel_size=3), name=name + "/dw_conv_pad")(x) x = dwconv3x3_block( x=x, in_channels=in_channels, out_channels=in_channels, padding=(0 if tf_mode else 1), bn_epsilon=bn_epsilon, activation=activation, name=name + "/dw_conv") x = se_block( x=x, channels=in_channels, reduction=4, activation=activation, name=name + "/se") x = conv1x1_block( x=x, in_channels=in_channels, out_channels=out_channels, bn_epsilon=bn_epsilon, activation=None, name=name + "/pw_conv") if residual: x = nn.add([x, identity], name=name + "/add") return x def effi_inv_res_unit(x, in_channels, out_channels, kernel_size, strides, expansion_factor, bn_epsilon, activation, tf_mode, name="effi_inv_res_unit"): """ EfficientNet inverted residual unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. expansion_factor : int Factor for expansion of channels. bn_epsilon : float Small float added to variance in Batch norm. activation : str Name of activation function. tf_mode : bool Whether to use TF-like mode. name : str, default 'effi_inv_res_unit' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ residual = (in_channels == out_channels) and (strides == 1) mid_channels = in_channels * expansion_factor dwconv_block_fn = dwconv3x3_block if kernel_size == 3 else (dwconv5x5_block if kernel_size == 5 else None) if residual: identity = x x = conv1x1_block( x=x, in_channels=in_channels, out_channels=mid_channels, bn_epsilon=bn_epsilon, activation=activation, name=name + "/conv1") if tf_mode: x = nn.ZeroPadding2D( padding=calc_tf_padding(x, kernel_size=kernel_size, strides=strides), name=name + "/conv2_pad")(x) x = dwconv_block_fn( x=x, in_channels=mid_channels, out_channels=mid_channels, strides=strides, padding=(0 if tf_mode else (kernel_size // 2)), bn_epsilon=bn_epsilon, activation=activation, name=name + "/conv2") x = se_block( x=x, channels=mid_channels, reduction=24, activation=activation, name=name + "/se") x = conv1x1_block( x=x, in_channels=mid_channels, out_channels=out_channels, bn_epsilon=bn_epsilon, activation=None, name=name + "/conv3") if residual: x = nn.add([x, identity], name=name + "/add") return x def effi_init_block(x, in_channels, out_channels, bn_epsilon, activation, tf_mode, name="effi_init_block"): """ EfficientNet specific initial block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. bn_epsilon : float Small float added to variance in Batch norm. activation : str Name of activation function. tf_mode : bool Whether to use TF-like mode. name : str, default 'effi_init_block' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ if tf_mode: x = nn.ZeroPadding2D( padding=calc_tf_padding(x, kernel_size=3, strides=2), name=name + "/conv_pad")(x) x = conv3x3_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=2, padding=(0 if tf_mode else 1), bn_epsilon=bn_epsilon, activation=activation, name=name + "/conv") return x def efficientnet_model(channels, init_block_channels, final_block_channels, kernel_sizes, strides_per_stage, expansion_factors, dropout_rate=0.2, tf_mode=False, bn_epsilon=1e-5, in_channels=3, in_size=(224, 224), classes=1000): """ EfficientNet(-B0) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : list of 2 int Numbers of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. kernel_sizes : list of list of int Number of kernel sizes for each unit. strides_per_stage : list int Stride value for the first unit of each stage. expansion_factors : list of list of int Number of expansion factors for each unit. dropout_rate : float, default 0.2 Fraction of the input units to drop. Must be a number between 0 and 1. tf_mode : bool, default False Whether to use TF-like mode. bn_epsilon : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) activation = "swish" x = effi_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels, bn_epsilon=bn_epsilon, activation=activation, tf_mode=tf_mode, name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): kernel_sizes_per_stage = kernel_sizes[i] expansion_factors_per_stage = expansion_factors[i] for j, out_channels in enumerate(channels_per_stage): kernel_size = kernel_sizes_per_stage[j] expansion_factor = expansion_factors_per_stage[j] strides = strides_per_stage[i] if (j == 0) else 1 if i == 0: x = effi_dws_conv_unit( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, bn_epsilon=bn_epsilon, activation=activation, tf_mode=tf_mode, name="features/stage{}/unit{}".format(i + 1, j + 1)) else: x = effi_inv_res_unit( x=x, in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, expansion_factor=expansion_factor, bn_epsilon=bn_epsilon, activation=activation, tf_mode=tf_mode, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = conv1x1_block( x=x, in_channels=in_channels, out_channels=final_block_channels, bn_epsilon=bn_epsilon, activation=activation, name="features/final_block") in_channels = final_block_channels x = nn.GlobalAveragePooling2D( name="features/final_pool")(x) if dropout_rate > 0.0: x = nn.Dropout( rate=dropout_rate, name="output/dropout")(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output/fc")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_efficientnet(version, in_size, tf_mode=False, bn_epsilon=1e-5, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create EfficientNet model with specific parameters. Parameters: ---------- version : str Version of EfficientNet ('b0'...'b7'). in_size : tuple of two ints Spatial size of the expected input image. tf_mode : bool, default False Whether to use TF-like mode. bn_epsilon : float, default 1e-5 Small float added to variance in Batch norm. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ if version == "b0": assert (in_size == (224, 224)) depth_factor = 1.0 width_factor = 1.0 dropout_rate = 0.2 elif version == "b1": assert (in_size == (240, 240)) depth_factor = 1.1 width_factor = 1.0 dropout_rate = 0.2 elif version == "b2": assert (in_size == (260, 260)) depth_factor = 1.2 width_factor = 1.1 dropout_rate = 0.3 elif version == "b3": assert (in_size == (300, 300)) depth_factor = 1.4 width_factor = 1.2 dropout_rate = 0.3 elif version == "b4": assert (in_size == (380, 380)) depth_factor = 1.8 width_factor = 1.4 dropout_rate = 0.4 elif version == "b5": assert (in_size == (456, 456)) depth_factor = 2.2 width_factor = 1.6 dropout_rate = 0.4 elif version == "b6": assert (in_size == (528, 528)) depth_factor = 2.6 width_factor = 1.8 dropout_rate = 0.5 elif version == "b7": assert (in_size == (600, 600)) depth_factor = 3.1 width_factor = 2.0 dropout_rate = 0.5 else: raise ValueError("Unsupported EfficientNet version {}".format(version)) init_block_channels = 32 layers = [1, 2, 2, 3, 3, 4, 1] downsample = [1, 1, 1, 1, 0, 1, 0] channels_per_layers = [16, 24, 40, 80, 112, 192, 320] expansion_factors_per_layers = [1, 6, 6, 6, 6, 6, 6] kernel_sizes_per_layers = [3, 3, 5, 3, 5, 5, 3] strides_per_stage = [1, 2, 2, 2, 1, 2, 1] final_block_channels = 1280 layers = [int(math.ceil(li * depth_factor)) for li in layers] channels_per_layers = [round_channels(ci * width_factor) for ci in channels_per_layers] from functools import reduce channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(channels_per_layers, layers, downsample), []) kernel_sizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(kernel_sizes_per_layers, layers, downsample), []) expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(expansion_factors_per_layers, layers, downsample), []) strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(strides_per_stage, layers, downsample), []) strides_per_stage = [si[0] for si in strides_per_stage] init_block_channels = round_channels(init_block_channels * width_factor) if width_factor > 1.0: assert (int(final_block_channels * width_factor) == round_channels(final_block_channels * width_factor)) final_block_channels = round_channels(final_block_channels * width_factor) net = efficientnet_model( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, kernel_sizes=kernel_sizes, strides_per_stage=strides_per_stage, expansion_factors=expansion_factors, dropout_rate=dropout_rate, tf_mode=tf_mode, bn_epsilon=bn_epsilon, in_size=in_size, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def efficientnet_b0(in_size=(224, 224), **kwargs): """ EfficientNet-B0 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b0", in_size=in_size, model_name="efficientnet_b0", **kwargs) def efficientnet_b1(in_size=(240, 240), **kwargs): """ EfficientNet-B1 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (240, 240) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b1", in_size=in_size, model_name="efficientnet_b1", **kwargs) def efficientnet_b2(in_size=(260, 260), **kwargs): """ EfficientNet-B2 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (260, 260) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b2", in_size=in_size, model_name="efficientnet_b2", **kwargs) def efficientnet_b3(in_size=(300, 300), **kwargs): """ EfficientNet-B3 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (300, 300) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b3", in_size=in_size, model_name="efficientnet_b3", **kwargs) def efficientnet_b4(in_size=(380, 380), **kwargs): """ EfficientNet-B4 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (380, 380) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b4", in_size=in_size, model_name="efficientnet_b4", **kwargs) def efficientnet_b5(in_size=(456, 456), **kwargs): """ EfficientNet-B5 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (456, 456) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b5", in_size=in_size, model_name="efficientnet_b5", **kwargs) def efficientnet_b6(in_size=(528, 528), **kwargs): """ EfficientNet-B6 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (528, 528) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b6", in_size=in_size, model_name="efficientnet_b6", **kwargs) def efficientnet_b7(in_size=(600, 600), **kwargs): """ EfficientNet-B7 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (600, 600) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b7", in_size=in_size, model_name="efficientnet_b7", **kwargs) def efficientnet_b0b(in_size=(224, 224), **kwargs): """ EfficientNet-B0-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b0", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b0b", **kwargs) def efficientnet_b1b(in_size=(240, 240), **kwargs): """ EfficientNet-B1-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (240, 240) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b1", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b1b", **kwargs) def efficientnet_b2b(in_size=(260, 260), **kwargs): """ EfficientNet-B2-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (260, 260) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b2", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b2b", **kwargs) def efficientnet_b3b(in_size=(300, 300), **kwargs): """ EfficientNet-B3-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (300, 300) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b3b", **kwargs) def efficientnet_b4b(in_size=(380, 380), **kwargs): """ EfficientNet-B4-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (380, 380) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b4b", **kwargs) def efficientnet_b5b(in_size=(456, 456), **kwargs): """ EfficientNet-B5-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (456, 456) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b5", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b5b", **kwargs) def efficientnet_b6b(in_size=(528, 528), **kwargs): """ EfficientNet-B6-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (528, 528) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b6", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b6b", **kwargs) def efficientnet_b7b(in_size=(600, 600), **kwargs): """ EfficientNet-B7-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (600, 600) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b7", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b7b", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ efficientnet_b0, efficientnet_b1, efficientnet_b2, efficientnet_b3, efficientnet_b4, efficientnet_b5, efficientnet_b6, efficientnet_b7, efficientnet_b0b, efficientnet_b1b, efficientnet_b2b, efficientnet_b3b, efficientnet_b4b, efficientnet_b5b, efficientnet_b6b, efficientnet_b7b, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != efficientnet_b0 or weight_count == 5288548) assert (model != efficientnet_b1 or weight_count == 7794184) assert (model != efficientnet_b2 or weight_count == 9109994) assert (model != efficientnet_b3 or weight_count == 12233232) assert (model != efficientnet_b4 or weight_count == 19341616) assert (model != efficientnet_b5 or weight_count == 30389784) assert (model != efficientnet_b6 or weight_count == 43040704) assert (model != efficientnet_b7 or weight_count == 66347960) assert (model != efficientnet_b0b or weight_count == 5288548) assert (model != efficientnet_b1b or weight_count == 7794184) assert (model != efficientnet_b2b or weight_count == 9109994) assert (model != efficientnet_b3b or weight_count == 12233232) assert (model != efficientnet_b4b or weight_count == 19341616) assert (model != efficientnet_b5b or weight_count == 30389784) assert (model != efficientnet_b6b or weight_count == 43040704) assert (model != efficientnet_b7b or weight_count == 66347960) if is_channels_first(): x = np.zeros((1, 3, net.in_size[0], net.in_size[1]), np.float32) else: x = np.zeros((1, net.in_size[0], net.in_size[1], 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
29,565
34.366029
120
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/squeezenext.py
""" SqueezeNext for ImageNet-1K, implemented in Keras. Original paper: 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. """ __all__ = ['squeezenext', 'sqnxt23_w1', 'sqnxt23_w3d2', 'sqnxt23_w2', 'sqnxt23v5_w1', 'sqnxt23v5_w3d2', 'sqnxt23v5_w2'] import os from keras import layers as nn from keras.models import Model from .common import maxpool2d, conv_block, conv1x1_block, conv7x7_block, is_channels_first, flatten def sqnxt_unit(x, in_channels, out_channels, strides, name="sqnxt_unit"): """ SqueezeNext unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. name : str, default 'sqnxt_unit' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ if strides == 2: reduction_den = 1 resize_identity = True elif in_channels > out_channels: reduction_den = 4 resize_identity = True else: reduction_den = 2 resize_identity = False if resize_identity: identity = conv1x1_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, use_bias=True, name=name + "/identity_conv") else: identity = x x = conv1x1_block( x=x, in_channels=in_channels, out_channels=(in_channels // reduction_den), strides=strides, use_bias=True, name=name + "/conv1") x = conv1x1_block( x=x, in_channels=(in_channels // reduction_den), out_channels=(in_channels // (2 * reduction_den)), use_bias=True, name=name + "/conv2") x = conv_block( x=x, in_channels=(in_channels // (2 * reduction_den)), out_channels=(in_channels // reduction_den), kernel_size=(1, 3), strides=1, padding=(0, 1), use_bias=True, name=name + "/conv3") x = conv_block( x=x, in_channels=(in_channels // reduction_den), out_channels=(in_channels // reduction_den), kernel_size=(3, 1), strides=1, padding=(1, 0), use_bias=True, name=name + "/conv4") x = conv1x1_block( x=x, in_channels=(in_channels // reduction_den), out_channels=out_channels, use_bias=True, name=name + "/conv5") x = nn.add([x, identity], name=name + "/add") x = nn.Activation("relu", name=name + "/final_activ")(x) return x def sqnxt_init_block(x, in_channels, out_channels, name="sqnxt_init_block"): """ ResNet specific initial block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. name : str, default 'sqnxt_init_block' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ x = conv7x7_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=2, padding=1, use_bias=True, name=name + "/conv") x = maxpool2d( x=x, pool_size=3, strides=2, ceil_mode=True, name=name + "/pool") return x def squeezenext(channels, init_block_channels, final_block_channels, in_channels=3, in_size=(224, 224), classes=1000): """ SqueezeNext model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = sqnxt_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels, name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 x = sqnxt_unit( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = conv1x1_block( x=x, in_channels=in_channels, out_channels=final_block_channels, use_bias=True, name="features/final_block") in_channels = final_block_channels x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) # x = nn.Flatten()(x) x = flatten(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_squeezenext(version, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create SqueezeNext model with specific parameters. Parameters: ---------- version : str Version of SqueezeNet ('23' or '23v5'). width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ init_block_channels = 64 final_block_channels = 128 channels_per_layers = [32, 64, 128, 256] if version == '23': layers = [6, 6, 8, 1] elif version == '23v5': layers = [2, 4, 14, 1] else: raise ValueError("Unsupported SqueezeNet version {}".format(version)) channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = int(init_block_channels * width_scale) final_block_channels = int(final_block_channels * width_scale) net = squeezenext( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def sqnxt23_w1(**kwargs): """ 1.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_squeezenext(version="23", width_scale=1.0, model_name="sqnxt23_w1", **kwargs) def sqnxt23_w3d2(**kwargs): """ 1.5-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_squeezenext(version="23", width_scale=1.5, model_name="sqnxt23_w3d2", **kwargs) def sqnxt23_w2(**kwargs): """ 2.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_squeezenext(version="23", width_scale=2.0, model_name="sqnxt23_w2", **kwargs) def sqnxt23v5_w1(**kwargs): """ 1.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_squeezenext(version="23v5", width_scale=1.0, model_name="sqnxt23v5_w1", **kwargs) def sqnxt23v5_w3d2(**kwargs): """ 1.5-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_squeezenext(version="23v5", width_scale=1.5, model_name="sqnxt23v5_w3d2", **kwargs) def sqnxt23v5_w2(**kwargs): """ 2.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_squeezenext(version="23v5", width_scale=2.0, model_name="sqnxt23v5_w2", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ sqnxt23_w1, sqnxt23_w3d2, sqnxt23_w2, sqnxt23v5_w1, sqnxt23v5_w3d2, sqnxt23v5_w2, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != sqnxt23_w1 or weight_count == 724056) assert (model != sqnxt23_w3d2 or weight_count == 1511824) assert (model != sqnxt23_w2 or weight_count == 2583752) assert (model != sqnxt23v5_w1 or weight_count == 921816) assert (model != sqnxt23v5_w3d2 or weight_count == 1953616) assert (model != sqnxt23v5_w2 or weight_count == 3366344) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
11,973
29.390863
119
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/resnet.py
""" ResNet for ImageNet-1K, implemented in Keras. Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. """ __all__ = ['resnet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2', 'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b', 'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'res_block', 'res_bottleneck_block', 'res_unit', 'res_init_block'] import os from keras import layers as nn from keras.models import Model from .common import conv1x1_block, conv3x3_block, conv7x7_block, maxpool2d, is_channels_first, flatten def res_block(x, in_channels, out_channels, strides, name="res_block"): """ Simple ResNet block for residual path in ResNet unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. name : str, default 'res_block' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ x = conv3x3_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, name=name + "/conv1") x = conv3x3_block( x=x, in_channels=in_channels, out_channels=out_channels, activation=None, name=name + "/conv2") return x def res_bottleneck_block(x, in_channels, out_channels, strides, conv1_stride=False, bottleneck_factor=4, name="res_bottleneck_block"): """ ResNet bottleneck block for residual path in ResNet unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. bottleneck_factor : int, default 4 Bottleneck factor. name : str, default 'res_bottleneck_block' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ mid_channels = out_channels // bottleneck_factor x = conv1x1_block( x=x, in_channels=in_channels, out_channels=mid_channels, strides=(strides if conv1_stride else 1), name=name + "/conv1") x = conv3x3_block( x=x, in_channels=in_channels, out_channels=mid_channels, strides=(1 if conv1_stride else strides), name=name + "/conv2") x = conv1x1_block( x=x, in_channels=in_channels, out_channels=out_channels, activation=None, name=name + "/conv3") return x def res_unit(x, in_channels, out_channels, strides, bottleneck, conv1_stride, name="res_unit"): """ ResNet unit with residual connection. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. name : str, default 'res_unit' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ resize_identity = (in_channels != out_channels) or (strides != 1) if resize_identity: identity = conv1x1_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, name=name + "/identity_conv") else: identity = x if bottleneck: x = res_bottleneck_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, conv1_stride=conv1_stride, name=name + "/body") else: x = res_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, name=name + "/body") x = nn.add([x, identity], name=name + "/add") x = nn.Activation("relu", name=name + "/activ")(x) return x def res_init_block(x, in_channels, out_channels, name="res_init_block"): """ ResNet specific initial block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. name : str, default 'res_init_block' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ x = conv7x7_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=2, name=name + "/conv") x = maxpool2d( x=x, pool_size=3, strides=2, padding=1, name=name + "/pool") return x def resnet(channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000): """ ResNet model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = res_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels, name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 x = res_unit( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, bottleneck=bottleneck, conv1_stride=conv1_stride, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) x = flatten(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_resnet(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = resnet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def resnet10(**kwargs): """ ResNet-10 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=10, model_name="resnet10", **kwargs) def resnet12(**kwargs): """ ResNet-12 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=12, model_name="resnet12", **kwargs) def resnet14(**kwargs): """ ResNet-14 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=14, model_name="resnet14", **kwargs) def resnetbc14b(**kwargs): """ ResNet-BC-14b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs) def resnet16(**kwargs): """ ResNet-16 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=16, model_name="resnet16", **kwargs) def resnet18_wd4(**kwargs): """ ResNet-18 model with 0.25 width scale from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs) def resnet18_wd2(**kwargs): """ ResNet-18 model with 0.5 width scale from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs) def resnet18_w3d4(**kwargs): """ ResNet-18 model with 0.75 width scale from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs) def resnet18(**kwargs): """ ResNet-18 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, model_name="resnet18", **kwargs) def resnet26(**kwargs): """ ResNet-26 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs) def resnetbc26b(**kwargs): """ ResNet-BC-26b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs) def resnet34(**kwargs): """ ResNet-34 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=34, model_name="resnet34", **kwargs) def resnetbc38b(**kwargs): """ ResNet-BC-38b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b", **kwargs) def resnet50(**kwargs): """ ResNet-50 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=50, model_name="resnet50", **kwargs) def resnet50b(**kwargs): """ ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs) def resnet101(**kwargs): """ ResNet-101 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=101, model_name="resnet101", **kwargs) def resnet101b(**kwargs): """ ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs) def resnet152(**kwargs): """ ResNet-152 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=152, model_name="resnet152", **kwargs) def resnet152b(**kwargs): """ ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs) def resnet200(**kwargs): """ ResNet-200 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=200, model_name="resnet200", **kwargs) def resnet200b(**kwargs): """ ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs) def _test(): import numpy as np import keras keras.backend.set_learning_phase(0) pretrained = False models = [ resnet10, resnet12, resnet14, resnetbc14b, resnet16, resnet18_wd4, resnet18_wd2, resnet18_w3d4, resnet18, resnet26, resnetbc26b, resnet34, resnetbc38b, resnet50, resnet50b, resnet101, resnet101b, resnet152, resnet152b, resnet200, resnet200b, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnet10 or weight_count == 5418792) assert (model != resnet12 or weight_count == 5492776) assert (model != resnet14 or weight_count == 5788200) assert (model != resnetbc14b or weight_count == 10064936) assert (model != resnet16 or weight_count == 6968872) assert (model != resnet18_wd4 or weight_count == 3937400) assert (model != resnet18_wd2 or weight_count == 5804296) assert (model != resnet18_w3d4 or weight_count == 8476056) assert (model != resnet18 or weight_count == 11689512) assert (model != resnet26 or weight_count == 17960232) assert (model != resnetbc26b or weight_count == 15995176) assert (model != resnet34 or weight_count == 21797672) assert (model != resnetbc38b or weight_count == 21925416) assert (model != resnet50 or weight_count == 25557032) assert (model != resnet50b or weight_count == 25557032) assert (model != resnet101 or weight_count == 44549160) assert (model != resnet101b or weight_count == 44549160) assert (model != resnet152 or weight_count == 60192808) assert (model != resnet152b or weight_count == 60192808) assert (model != resnet200 or weight_count == 64673832) assert (model != resnet200b or weight_count == 64673832) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
24,153
30.698163
118
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/mobilenetv2.py
""" MobileNetV2 for ImageNet-1K, implemented in Keras. Original paper: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. """ __all__ = ['mobilenetv2', 'mobilenetv2_w1', 'mobilenetv2_w3d4', 'mobilenetv2_wd2', 'mobilenetv2_wd4'] import os from keras import layers as nn from keras.models import Model from .common import conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, is_channels_first, flatten def linear_bottleneck(x, in_channels, out_channels, strides, expansion, name="linear_bottleneck"): """ So-called 'Linear Bottleneck' layer. It is used as a MobileNetV2 unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. expansion : bool Whether do expansion of channels. name : str, default 'linear_bottleneck' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ residual = (in_channels == out_channels) and (strides == 1) mid_channels = in_channels * 6 if expansion else in_channels if residual: identity = x x = conv1x1_block( x=x, in_channels=in_channels, out_channels=mid_channels, activation="relu6", name=name + "/conv1") x = dwconv3x3_block( x=x, in_channels=mid_channels, out_channels=mid_channels, strides=strides, activation="relu6", name=name + "/conv2") x = conv1x1_block( x=x, in_channels=mid_channels, out_channels=out_channels, activation=None, name=name + "/conv3") if residual: x = nn.add([x, identity], name=name + "/add") return x def mobilenetv2(channels, init_block_channels, final_block_channels, in_channels=3, in_size=(224, 224), classes=1000): """ MobileNetV2 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = conv3x3_block( x=input, in_channels=in_channels, out_channels=init_block_channels, strides=2, activation="relu6", name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 expansion = (i != 0) or (j != 0) x = linear_bottleneck( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, expansion=expansion, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = conv1x1_block( x=x, in_channels=in_channels, out_channels=final_block_channels, activation="relu6", name="features/final_block") in_channels = final_block_channels x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) x = conv1x1( x=x, in_channels=in_channels, out_channels=classes, use_bias=False, name="output") # x = nn.Flatten()(x) x = flatten(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_mobilenetv2(width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create MobileNetV2 model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ init_block_channels = 32 final_block_channels = 1280 layers = [1, 2, 3, 4, 3, 3, 1] downsample = [0, 1, 1, 1, 0, 1, 0] channels_per_layers = [16, 24, 32, 64, 96, 160, 320] from functools import reduce channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(channels_per_layers, layers, downsample), [[]]) if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = int(init_block_channels * width_scale) if width_scale > 1.0: final_block_channels = int(final_block_channels * width_scale) net = mobilenetv2( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def mobilenetv2_w1(**kwargs): """ 1.0 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=1.0, model_name="mobilenetv2_w1", **kwargs) def mobilenetv2_w3d4(**kwargs): """ 0.75 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.75, model_name="mobilenetv2_w3d4", **kwargs) def mobilenetv2_wd2(**kwargs): """ 0.5 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.5, model_name="mobilenetv2_wd2", **kwargs) def mobilenetv2_wd4(**kwargs): """ 0.25 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.25, model_name="mobilenetv2_wd4", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ mobilenetv2_w1, mobilenetv2_w3d4, mobilenetv2_wd2, mobilenetv2_wd4, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != mobilenetv2_w1 or weight_count == 3504960) assert (model != mobilenetv2_w3d4 or weight_count == 2627592) assert (model != mobilenetv2_wd2 or weight_count == 1964736) assert (model != mobilenetv2_wd4 or weight_count == 1516392) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
9,328
30.305369
118
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/squeezenet.py
""" SqueezeNet for ImageNet-1K, implemented in Keras. Original paper: 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. """ __all__ = ['squeezenet', 'squeezenet_v1_0', 'squeezenet_v1_1', 'squeezeresnet_v1_0', 'squeezeresnet_v1_1'] import os from keras import layers as nn from keras.models import Model from .common import maxpool2d, conv2d, is_channels_first, get_channel_axis, flatten def fire_conv(x, in_channels, out_channels, kernel_size, padding, name="fire_conv"): """ SqueezeNet specific convolution block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. padding : int or tuple/list of 2 int Padding value for convolution layer. name : str, default 'fire_conv' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ x = conv2d( x=x, in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding, use_bias=True, name=name + "/conv") x = nn.Activation("relu", name=name + "/activ")(x) return x def fire_unit(x, in_channels, squeeze_channels, expand1x1_channels, expand3x3_channels, residual, name="fire_unit"): """ SqueezeNet unit, so-called 'Fire' unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. squeeze_channels : int Number of output channels for squeeze convolution blocks. expand1x1_channels : int Number of output channels for expand 1x1 convolution blocks. expand3x3_channels : int Number of output channels for expand 3x3 convolution blocks. residual : bool Whether use residual connection. name : str, default 'fire_unit' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ if residual: identity = x x = fire_conv( x=x, in_channels=in_channels, out_channels=squeeze_channels, kernel_size=1, padding=0, name=name + "/squeeze") y1 = fire_conv( x=x, in_channels=squeeze_channels, out_channels=expand1x1_channels, kernel_size=1, padding=0, name=name + "/expand1x1") y2 = fire_conv( x=x, in_channels=squeeze_channels, out_channels=expand3x3_channels, kernel_size=3, padding=1, name=name + "/expand3x3") out = nn.concatenate([y1, y2], axis=get_channel_axis(), name=name + "/concat") if residual: out = nn.add([out, identity], name=name + "/add") return out def squeeze_init_block(x, in_channels, out_channels, kernel_size, name="squeeze_init_block"): """ ResNet specific initial block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. name : str, default 'squeeze_init_block' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ x = conv2d( x=x, in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=2, use_bias=True, name=name + "/conv") x = nn.Activation("relu", name=name + "/activ")(x) return x def squeezenet(channels, residuals, init_block_kernel_size, init_block_channels, in_channels=3, in_size=(224, 224), classes=1000): """ SqueezeNet model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- channels : list of list of int Number of output channels for each unit. residuals : bool Whether to use residual units. init_block_kernel_size : int or tuple/list of 2 int The dimensions of the convolution window for the initial unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = squeeze_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels, kernel_size=init_block_kernel_size, name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): x = maxpool2d( x=x, pool_size=3, strides=2, ceil_mode=True, name="features/pool{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): expand_channels = out_channels // 2 squeeze_channels = out_channels // 8 x = fire_unit( x=x, in_channels=in_channels, squeeze_channels=squeeze_channels, expand1x1_channels=expand_channels, expand3x3_channels=expand_channels, residual=((residuals is not None) and (residuals[i][j] == 1)), name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = nn.Dropout( rate=0.5, name="features/dropout")(x) x = nn.Conv2D( filters=classes, kernel_size=1, name="output/final_conv")(x) x = nn.Activation("relu", name="output/final_activ")(x) x = nn.AvgPool2D( pool_size=13, strides=1, name="output/final_pool")(x) # x = nn.Flatten()(x) x = flatten(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_squeezenet(version, residual=False, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create SqueezeNet model with specific parameters. Parameters: ---------- version : str Version of SqueezeNet ('1.0' or '1.1'). residual : bool, default False Whether to use residual connections. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ if version == '1.0': channels = [[128, 128, 256], [256, 384, 384, 512], [512]] residuals = [[0, 1, 0], [1, 0, 1, 0], [1]] init_block_kernel_size = 7 init_block_channels = 96 elif version == '1.1': channels = [[128, 128], [256, 256], [384, 384, 512, 512]] residuals = [[0, 1], [0, 1], [0, 1, 0, 1]] init_block_kernel_size = 3 init_block_channels = 64 else: raise ValueError("Unsupported SqueezeNet version {}".format(version)) if not residual: residuals = None net = squeezenet( channels=channels, residuals=residuals, init_block_kernel_size=init_block_kernel_size, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def squeezenet_v1_0(**kwargs): """ SqueezeNet 'vanilla' model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_squeezenet(version="1.0", residual=False, model_name="squeezenet_v1_0", **kwargs) def squeezenet_v1_1(**kwargs): """ SqueezeNet v1.1 model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_squeezenet(version="1.1", residual=False, model_name="squeezenet_v1_1", **kwargs) def squeezeresnet_v1_0(**kwargs): """ SqueezeNet model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_squeezenet(version="1.0", residual=True, model_name="squeezeresnet_v1_0", **kwargs) def squeezeresnet_v1_1(**kwargs): """ SqueezeNet v1.1 model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_squeezenet(version="1.1", residual=True, model_name="squeezeresnet_v1_1", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ squeezenet_v1_0, squeezenet_v1_1, squeezeresnet_v1_0, squeezeresnet_v1_1, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != squeezenet_v1_0 or weight_count == 1248424) assert (model != squeezenet_v1_1 or weight_count == 1235496) assert (model != squeezeresnet_v1_0 or weight_count == 1248424) assert (model != squeezeresnet_v1_1 or weight_count == 1235496) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
12,000
29.693095
118
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/vgg.py
""" VGG for ImageNet-1K, implemented in Keras. Original paper: 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. """ __all__ = ['vgg', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'bn_vgg11', 'bn_vgg13', 'bn_vgg16', 'bn_vgg19', 'bn_vgg11b', 'bn_vgg13b', 'bn_vgg16b', 'bn_vgg19b'] import os from keras import layers as nn from keras.models import Model from .common import conv3x3_block, is_channels_first, flatten def vgg_dense(x, in_channels, out_channels, name="vgg_dense"): """ VGG specific dense block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. name : str, default 'vgg_dense' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ x = nn.Dense( units=out_channels, input_dim=in_channels, name=name + "/fc")(x) x = nn.Activation("relu", name=name + "/activ")(x) x = nn.Dropout( rate=0.5, name=name + "/dropout")(x) return x def vgg_output_block(x, in_channels, classes, name="vgg_output_block"): """ VGG specific output block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. classes : int Number of classification classes. name : str, default 'vgg_output_block' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ mid_channels = 4096 x = vgg_dense( x=x, in_channels=in_channels, out_channels=mid_channels, name=name + "/fc1") x = vgg_dense( x=x, in_channels=mid_channels, out_channels=mid_channels, name=name + "/fc2") x = nn.Dense( units=classes, input_dim=mid_channels, name=name + "/fc3")(x) return x def vgg(channels, use_bias=True, use_bn=False, in_channels=3, in_size=(224, 224), classes=1000): """ VGG models from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- channels : list of list of int Number of output channels for each unit. use_bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default False Whether to use BatchNorm layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = input for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): x = conv3x3_block( x=x, in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = nn.MaxPool2D( pool_size=2, strides=2, padding="valid", name="features/stage{}/pool".format(i + 1))(x) x = flatten(x, reshape=True) x = vgg_output_block( x=x, in_channels=(in_channels * 7 * 7), classes=classes, name="output") model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_vgg(blocks, use_bias=True, use_bn=False, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create VGG model with specific parameters. Parameters: ---------- blocks : int Number of blocks. use_bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default False Whether to use BatchNorm layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ if blocks == 11: layers = [1, 1, 2, 2, 2] elif blocks == 13: layers = [2, 2, 2, 2, 2] elif blocks == 16: layers = [2, 2, 3, 3, 3] elif blocks == 19: layers = [2, 2, 4, 4, 4] else: raise ValueError("Unsupported VGG with number of blocks: {}".format(blocks)) channels_per_layers = [64, 128, 256, 512, 512] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = vgg( channels=channels, use_bias=use_bias, use_bn=use_bn, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def vgg11(**kwargs): """ VGG-11 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_vgg(blocks=11, model_name="vgg11", **kwargs) def vgg13(**kwargs): """ VGG-13 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_vgg(blocks=13, model_name="vgg13", **kwargs) def vgg16(**kwargs): """ VGG-16 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_vgg(blocks=16, model_name="vgg16", **kwargs) def vgg19(**kwargs): """ VGG-19 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_vgg(blocks=19, model_name="vgg19", **kwargs) def bn_vgg11(**kwargs): """ VGG-11 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_vgg(blocks=11, use_bias=False, use_bn=True, model_name="bn_vgg11", **kwargs) def bn_vgg13(**kwargs): """ VGG-13 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_vgg(blocks=13, use_bias=False, use_bn=True, model_name="bn_vgg13", **kwargs) def bn_vgg16(**kwargs): """ VGG-16 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_vgg(blocks=16, use_bias=False, use_bn=True, model_name="bn_vgg16", **kwargs) def bn_vgg19(**kwargs): """ VGG-19 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_vgg(blocks=19, use_bias=False, use_bn=True, model_name="bn_vgg19", **kwargs) def bn_vgg11b(**kwargs): """ VGG-11 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_vgg(blocks=11, use_bias=True, use_bn=True, model_name="bn_vgg11b", **kwargs) def bn_vgg13b(**kwargs): """ VGG-13 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_vgg(blocks=13, use_bias=True, use_bn=True, model_name="bn_vgg13b", **kwargs) def bn_vgg16b(**kwargs): """ VGG-16 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_vgg(blocks=16, use_bias=True, use_bn=True, model_name="bn_vgg16b", **kwargs) def bn_vgg19b(**kwargs): """ VGG-19 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_vgg(blocks=19, use_bias=True, use_bn=True, model_name="bn_vgg19b", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ vgg11, vgg13, vgg16, vgg19, bn_vgg11, bn_vgg13, bn_vgg16, bn_vgg19, bn_vgg11b, bn_vgg13b, bn_vgg16b, bn_vgg19b, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != vgg11 or weight_count == 132863336) assert (model != vgg13 or weight_count == 133047848) assert (model != vgg16 or weight_count == 138357544) assert (model != vgg19 or weight_count == 143667240) assert (model != bn_vgg11 or weight_count == 132866088) assert (model != bn_vgg13 or weight_count == 133050792) assert (model != bn_vgg16 or weight_count == 138361768) assert (model != bn_vgg19 or weight_count == 143672744) assert (model != bn_vgg11b or weight_count == 132868840) assert (model != bn_vgg13b or weight_count == 133053736) assert (model != bn_vgg16b or weight_count == 138365992) assert (model != bn_vgg19b or weight_count == 143678248) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
13,419
29.639269
117
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/mnasnet.py
""" MnasNet for ImageNet-1K, implemented in Keras. Original paper: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. """ __all__ = ['mnasnet_model', 'mnasnet_b1', 'mnasnet_a1', 'mnasnet_small'] import os from keras import layers as nn from keras.models import Model from .common import is_channels_first, flatten, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block,\ se_block, round_channels def dws_exp_se_res_unit(x, in_channels, out_channels, strides=1, use_kernel3=True, exp_factor=1, se_factor=0, use_skip=True, activation="relu", name="dws_exp_se_res_unit"): """ Depthwise separable expanded residual unit with SE-block. Here it used as MnasNet unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the second convolution layer. use_kernel3 : bool, default True Whether to use 3x3 (instead of 5x5) kernel. exp_factor : int, default 1 Expansion factor for each unit. se_factor : int, default 0 SE reduction factor for each unit. use_skip : bool, default True Whether to use skip connection. activation : str, default 'relu' Activation function or name of activation function. name : str, default 'dws_exp_se_res_unit' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ assert (exp_factor >= 1) residual = (in_channels == out_channels) and (strides == 1) and use_skip use_exp_conv = exp_factor > 1 use_se = se_factor > 0 mid_channels = exp_factor * in_channels dwconv_block_fn = dwconv3x3_block if use_kernel3 else dwconv5x5_block if residual: identity = x if use_exp_conv: x = conv1x1_block( x=x, in_channels=in_channels, out_channels=mid_channels, activation=activation, name=name + "/exp_conv") x = dwconv_block_fn( x=x, in_channels=mid_channels, out_channels=mid_channels, strides=strides, activation=activation, name=name + "/dw_conv") if use_se: x = se_block( x=x, channels=mid_channels, reduction=(exp_factor * se_factor), approx_sigmoid=False, round_mid=False, activation=activation, name=name + "/se") x = conv1x1_block( x=x, in_channels=mid_channels, out_channels=out_channels, activation=None, name=name + "/pw_conv") if residual: x = nn.add([x, identity], name=name + "/add") return x def mnas_init_block(x, in_channels, out_channels, mid_channels, use_skip, name="mnas_init_block"): """ MnasNet specific initial block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. use_skip : bool Whether to use skip connection in the second block. name : str, default 'mnas_init_block' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ x = conv3x3_block( x=x, in_channels=in_channels, out_channels=mid_channels, strides=2, name=name + "/conv1") x = dws_exp_se_res_unit( x=x, in_channels=mid_channels, out_channels=out_channels, use_skip=use_skip, name=name + "/conv2") return x def mnas_final_block(x, in_channels, out_channels, mid_channels, use_skip, name="mnas_final_block"): """ MnasNet specific final block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. use_skip : bool Whether to use skip connection in the second block. name : str, default 'mnas_init_block' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ x = dws_exp_se_res_unit( x=x, in_channels=in_channels, out_channels=mid_channels, exp_factor=6, use_skip=use_skip, name=name + "/conv1") x = conv1x1_block( x=x, in_channels=mid_channels, out_channels=out_channels, name=name + "/conv2") return x def mnasnet_model(channels, init_block_channels, final_block_channels, kernels3, exp_factors, se_factors, init_block_use_skip, final_block_use_skip, in_channels=3, in_size=(224, 224), classes=1000): """ MnasNet model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : list of 2 int Number of output channels for the initial unit. final_block_channels : list of 2 int Number of output channels for the final block of the feature extractor. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. exp_factors : list of list of int Expansion factor for each unit. se_factors : list of list of int SE reduction factor for each unit. init_block_use_skip : bool Whether to use skip connection in the initial unit. final_block_use_skip : bool Whether to use skip connection in the final block of the feature extractor. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = mnas_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels[1], mid_channels=init_block_channels[0], use_skip=init_block_use_skip, name="features/init_block") in_channels = init_block_channels[1] for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) else 1 use_kernel3 = kernels3[i][j] == 1 exp_factor = exp_factors[i][j] se_factor = se_factors[i][j] x = dws_exp_se_res_unit( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, use_kernel3=use_kernel3, exp_factor=exp_factor, se_factor=se_factor, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = mnas_final_block( x=x, in_channels=in_channels, out_channels=final_block_channels[1], mid_channels=final_block_channels[0], use_skip=final_block_use_skip, name="features/final_block") in_channels = final_block_channels[1] x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) # x = nn.Flatten()(x) x = flatten(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_mnasnet(version, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create MnasNet model with specific parameters. Parameters: ---------- version : str Version of MobileNetV3 ('b1', 'a1' or 'small'). width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ if version == "b1": init_block_channels = [32, 16] final_block_channels = [320, 1280] channels = [[24, 24, 24], [40, 40, 40], [80, 80, 80, 96, 96], [192, 192, 192, 192]] kernels3 = [[1, 1, 1], [0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 0, 0]] exp_factors = [[3, 3, 3], [3, 3, 3], [6, 6, 6, 6, 6], [6, 6, 6, 6]] se_factors = [[0, 0, 0], [0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0]] init_block_use_skip = False final_block_use_skip = False elif version == "a1": init_block_channels = [32, 16] final_block_channels = [320, 1280] channels = [[24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]] kernels3 = [[1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]] exp_factors = [[6, 6], [3, 3, 3], [6, 6, 6, 6, 6, 6], [6, 6, 6]] se_factors = [[0, 0], [4, 4, 4], [0, 0, 0, 0, 4, 4], [4, 4, 4]] init_block_use_skip = False final_block_use_skip = True elif version == "small": init_block_channels = [8, 8] final_block_channels = [144, 1280] channels = [[16], [16, 16], [32, 32, 32, 32, 32, 32, 32], [88, 88, 88]] kernels3 = [[1], [1, 1], [0, 0, 0, 0, 1, 1, 1], [0, 0, 0]] exp_factors = [[3], [6, 6], [6, 6, 6, 6, 6, 6, 6], [6, 6, 6]] se_factors = [[0], [0, 0], [4, 4, 4, 4, 4, 4, 4], [4, 4, 4]] init_block_use_skip = True final_block_use_skip = True else: raise ValueError("Unsupported MnasNet version {}".format(version)) if width_scale != 1.0: channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = round_channels(init_block_channels * width_scale) net = mnasnet_model( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, kernels3=kernels3, exp_factors=exp_factors, se_factors=se_factors, init_block_use_skip=init_block_use_skip, final_block_use_skip=final_block_use_skip, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def mnasnet_b1(**kwargs): """ MnasNet-B1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mnasnet(version="b1", width_scale=1.0, model_name="mnasnet_b1", **kwargs) def mnasnet_a1(**kwargs): """ MnasNet-A1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mnasnet(version="a1", width_scale=1.0, model_name="mnasnet_a1", **kwargs) def mnasnet_small(**kwargs): """ MnasNet-Small model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mnasnet(version="small", width_scale=1.0, model_name="mnasnet_small", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ mnasnet_b1, mnasnet_a1, mnasnet_small, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != mnasnet_b1 or weight_count == 4383312) assert (model != mnasnet_a1 or weight_count == 3887038) assert (model != mnasnet_small or weight_count == 2030264) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
14,240
31.439636
118
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/seresnet.py
""" SE-ResNet for ImageNet-1K, implemented in Keras. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['seresnet', 'seresnet10', 'seresnet12', 'seresnet14', 'seresnet16', 'seresnet18', 'seresnet26', 'seresnetbc26b', 'seresnet34', 'seresnetbc38b', 'seresnet50', 'seresnet50b', 'seresnet101', 'seresnet101b', 'seresnet152', 'seresnet152b', 'seresnet200', 'seresnet200b'] import os from keras import layers as nn from keras.models import Model from .common import conv1x1_block, se_block, is_channels_first, flatten from .resnet import res_block, res_bottleneck_block, res_init_block def seres_unit(x, in_channels, out_channels, strides, bottleneck, conv1_stride, name="seres_unit"): """ SE-ResNet unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. name : str, default 'seres_unit' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ resize_identity = (in_channels != out_channels) or (strides != 1) if resize_identity: identity = conv1x1_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, name=name + "/identity_conv") else: identity = x if bottleneck: x = res_bottleneck_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, conv1_stride=conv1_stride, name=name + "/body") else: x = res_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, name=name + "/body") x = se_block( x=x, channels=out_channels, name=name + "/se") x = nn.add([x, identity], name=name + "/add") x = nn.Activation("relu", name=name + "/activ")(x) return x def seresnet(channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000): """ SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = res_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels, name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 x = seres_unit( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, bottleneck=bottleneck, conv1_stride=conv1_stride, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) # x = nn.Flatten()(x) x = flatten(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_seresnet(blocks, bottleneck=None, conv1_stride=True, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create SE-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported SE-ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = seresnet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def seresnet10(**kwargs): """ SE-ResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=10, model_name="seresnet10", **kwargs) def seresnet12(**kwargs): """ SE-ResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=12, model_name="seresnet12", **kwargs) def seresnet14(**kwargs): """ SE-ResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=14, model_name="seresnet14", **kwargs) def seresnet16(**kwargs): """ SE-ResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=16, model_name="seresnet16", **kwargs) def seresnet18(**kwargs): """ SE-ResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=18, model_name="seresnet18", **kwargs) def seresnet26(**kwargs): """ SE-ResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=26, bottleneck=False, model_name="seresnet26", **kwargs) def seresnetbc26b(**kwargs): """ SE-ResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="seresnetbc26b", **kwargs) def seresnet34(**kwargs): """ SE-ResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=34, model_name="seresnet34", **kwargs) def seresnetbc38b(**kwargs): """ SE-ResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="seresnetbc38b", **kwargs) def seresnet50(**kwargs): """ SE-ResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=50, model_name="seresnet50", **kwargs) def seresnet50b(**kwargs): """ SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=50, conv1_stride=False, model_name="seresnet50b", **kwargs) def seresnet101(**kwargs): """ SE-ResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=101, model_name="seresnet101", **kwargs) def seresnet101b(**kwargs): """ SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=101, conv1_stride=False, model_name="seresnet101b", **kwargs) def seresnet152(**kwargs): """ SE-ResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=152, model_name="seresnet152", **kwargs) def seresnet152b(**kwargs): """ SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=152, conv1_stride=False, model_name="seresnet152b", **kwargs) def seresnet200(**kwargs): """ SE-ResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=200, model_name="seresnet200", **kwargs) def seresnet200b(**kwargs): """ SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnet(blocks=200, conv1_stride=False, model_name="seresnet200b", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ seresnet10, seresnet12, seresnet14, seresnet16, seresnet18, seresnet26, seresnetbc26b, seresnet34, seresnetbc38b, seresnet50, seresnet50b, seresnet101, seresnet101b, seresnet152, seresnet152b, seresnet200, seresnet200b, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != seresnet10 or weight_count == 5463332) assert (model != seresnet12 or weight_count == 5537896) assert (model != seresnet14 or weight_count == 5835504) assert (model != seresnet16 or weight_count == 7024640) assert (model != seresnet18 or weight_count == 11778592) assert (model != seresnet26 or weight_count == 18093852) assert (model != seresnetbc26b or weight_count == 17395976) assert (model != seresnet34 or weight_count == 21958868) assert (model != seresnetbc38b or weight_count == 24026616) assert (model != seresnet50 or weight_count == 28088024) assert (model != seresnet50b or weight_count == 28088024) assert (model != seresnet101 or weight_count == 49326872) assert (model != seresnet101b or weight_count == 49326872) assert (model != seresnet152 or weight_count == 66821848) assert (model != seresnet152b or weight_count == 66821848) assert (model != seresnet200 or weight_count == 71835864) assert (model != seresnet200b or weight_count == 71835864) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
17,838
31.02693
118
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/densenet.py
""" DenseNet for ImageNet-1K, implemented in Keras. Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. """ __all__ = ['densenet', 'densenet121', 'densenet161', 'densenet169', 'densenet201'] import os from keras import layers as nn from keras.models import Model from .common import pre_conv1x1_block, pre_conv3x3_block, is_channels_first, get_channel_axis, flatten from .preresnet import preres_init_block, preres_activation def dense_unit(x, in_channels, out_channels, dropout_rate, name="dense_unit"): """ DenseNet unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. name : str, default 'dense_unit' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor. """ bn_size = 4 inc_channels = out_channels - in_channels mid_channels = inc_channels * bn_size identity = x x = pre_conv1x1_block( x=x, in_channels=in_channels, out_channels=mid_channels, name=name + "/conv1") x = pre_conv3x3_block( x=x, in_channels=mid_channels, out_channels=inc_channels, name=name + "/conv2") use_dropout = (dropout_rate != 0.0) if use_dropout: x = nn.Dropout( rate=dropout_rate, name=name + "dropout")(x) x = nn.concatenate([identity, x], axis=get_channel_axis(), name=name + "/concat") return x def transition_block(x, in_channels, out_channels, name="transition_block"): """ DenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the first unit of each stage. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. name : str, default 'transition_block' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor. """ x = pre_conv1x1_block( x=x, in_channels=in_channels, out_channels=out_channels, name=name + "/conv") x = nn.AvgPool2D( pool_size=2, strides=2, padding="valid", name=name + "/pool")(x) return x def densenet(channels, init_block_channels, dropout_rate=0.0, in_channels=3, in_size=(224, 224), classes=1000): """ DenseNet model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = preres_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels, name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): if i != 0: x = transition_block( x=x, in_channels=in_channels, out_channels=(in_channels // 2), name="features/stage{}/trans{}".format(i + 1, i + 1)) in_channels = in_channels // 2 for j, out_channels in enumerate(channels_per_stage): x = dense_unit( x=x, in_channels=in_channels, out_channels=out_channels, dropout_rate=dropout_rate, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = preres_activation( x=x, name="features/post_activ") x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) # x = nn.Flatten()(x) x = flatten(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_densenet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create DenseNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ if blocks == 121: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 24, 16] elif blocks == 161: init_block_channels = 96 growth_rate = 48 layers = [6, 12, 36, 24] elif blocks == 169: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 32, 32] elif blocks == 201: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 48, 32] else: raise ValueError("Unsupported DenseNet version with number of layers {}".format(blocks)) from functools import reduce channels = reduce(lambda xi, yi: xi + [reduce(lambda xj, yj: xj + [xj[-1] + yj], [growth_rate] * yi, [xi[-1][-1] // 2])[1:]], layers, [[init_block_channels * 2]])[1:] net = densenet( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def densenet121(**kwargs): """ DenseNet-121 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_densenet(blocks=121, model_name="densenet121", **kwargs) def densenet161(**kwargs): """ DenseNet-161 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_densenet(blocks=161, model_name="densenet161", **kwargs) def densenet169(**kwargs): """ DenseNet-169 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_densenet(blocks=169, model_name="densenet169", **kwargs) def densenet201(**kwargs): """ DenseNet-201 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_densenet(blocks=201, model_name="densenet201", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ densenet121, densenet161, densenet169, densenet201, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != densenet121 or weight_count == 7978856) assert (model != densenet161 or weight_count == 28681000) assert (model != densenet169 or weight_count == 14149480) assert (model != densenet201 or weight_count == 20013928) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
9,837
28.722054
116
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/seresnext.py
""" SE-ResNeXt for ImageNet-1K, implemented in Keras. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['seresnext', 'seresnext50_32x4d', 'seresnext101_32x4d', 'seresnext101_64x4d'] import os from keras import layers as nn from keras.models import Model from .common import conv1x1_block, se_block, is_channels_first, flatten from .resnet import res_init_block from .resnext import resnext_bottleneck def seresnext_unit(x, in_channels, out_channels, strides, cardinality, bottleneck_width, name="seresnext_unit"): """ SE-ResNeXt unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. name : str, default 'seresnext_unit' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ resize_identity = (in_channels != out_channels) or (strides != 1) if resize_identity: identity = conv1x1_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, name=name + "/identity_conv") else: identity = x x = resnext_bottleneck( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, cardinality=cardinality, bottleneck_width=bottleneck_width, name=name + "/body") x = se_block( x=x, channels=out_channels, name=name + "/se") x = nn.add([x, identity], name=name + "/add") activ = nn.Activation("relu", name=name + "/activ") x = activ(x) return x def seresnext(channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(224, 224), classes=1000): """ SE-ResNeXt model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = res_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels, name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 x = seresnext_unit( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, cardinality=cardinality, bottleneck_width=bottleneck_width, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) # x = nn.Flatten()(x) x = flatten(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_seresnext(blocks, cardinality, bottleneck_width, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create SE-ResNeXt model with specific parameters. Parameters: ---------- blocks : int Number of blocks. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] else: raise ValueError("Unsupported SE-ResNeXt with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = seresnext( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def seresnext50_32x4d(**kwargs): """ SE-ResNeXt-50 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="seresnext50_32x4d", **kwargs) def seresnext101_32x4d(**kwargs): """ SE-ResNeXt-101 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="seresnext101_32x4d", **kwargs) def seresnext101_64x4d(**kwargs): """ SE-ResNeXt-101 (64x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_seresnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="seresnext101_64x4d", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ seresnext50_32x4d, seresnext101_32x4d, seresnext101_64x4d, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != seresnext50_32x4d or weight_count == 27559896) assert (model != seresnext101_32x4d or weight_count == 48955416) assert (model != seresnext101_64x4d or weight_count == 88232984) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
8,382
29.046595
115
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/mobilenetv3.py
""" MobileNetV3 for ImageNet-1K, implemented in Keras. Original paper: 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. """ __all__ = ['mobilenetv3', 'mobilenetv3_small_w7d20', 'mobilenetv3_small_wd2', 'mobilenetv3_small_w3d4', 'mobilenetv3_small_w1', 'mobilenetv3_small_w5d4', 'mobilenetv3_large_w7d20', 'mobilenetv3_large_wd2', 'mobilenetv3_large_w3d4', 'mobilenetv3_large_w1', 'mobilenetv3_large_w5d4'] import os from keras import layers as nn from keras.models import Model from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block,\ se_block, HSwish, is_channels_first, flatten def mobilenetv3_unit(x, in_channels, out_channels, exp_channels, strides, use_kernel3, activation, use_se, name="mobilenetv3_unit"): """ MobileNetV3 unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. exp_channels : int Number of middle (expanded) channels. strides : int or tuple/list of 2 int Strides of the second convolution layer. use_kernel3 : bool Whether to use 3x3 (instead of 5x5) kernel. activation : str Activation function or name of activation function. use_se : bool Whether to use SE-module. name : str, default 'mobilenetv3_unit' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ assert (exp_channels >= out_channels) residual = (in_channels == out_channels) and (strides == 1) use_exp_conv = exp_channels != out_channels mid_channels = exp_channels if residual: identity = x if use_exp_conv: x = conv1x1_block( x=x, in_channels=in_channels, out_channels=mid_channels, activation=activation, name=name + "/exp_conv") if use_kernel3: x = dwconv3x3_block( x=x, in_channels=mid_channels, out_channels=mid_channels, strides=strides, activation=activation, name=name + "/conv1") else: x = dwconv5x5_block( x=x, in_channels=mid_channels, out_channels=mid_channels, strides=strides, activation=activation, name=name + "/conv1") if use_se: x = se_block( x=x, channels=mid_channels, reduction=4, approx_sigmoid=True, round_mid=True, name=name + "/se") x = conv1x1_block( x=x, in_channels=mid_channels, out_channels=out_channels, activation=None, name=name + "/conv2") if residual: x = nn.add([x, identity], name=name + "/add") return x def mobilenetv3_final_block(x, in_channels, out_channels, use_se, name="mobilenetv3_final_block"): """ MobileNetV3 final block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. use_se : bool Whether to use SE-module. name : str, default 'mobilenetv3_final_block' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ x = conv1x1_block( x=x, in_channels=in_channels, out_channels=out_channels, activation="hswish", name=name + "/conv") if use_se: x = se_block( x=x, channels=out_channels, reduction=4, approx_sigmoid=True, round_mid=True, name=name + "/se") return x def mobilenetv3_classifier(x, in_channels, out_channels, mid_channels, dropout_rate, name="mobilenetv3_final_block"): """ MobileNetV3 classifier. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. name : str, default 'mobilenetv3_classifier' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ x = conv1x1( x=x, in_channels=in_channels, out_channels=mid_channels, name=name + "/conv1") x = HSwish(name=name + "/hswish")(x) use_dropout = (dropout_rate != 0.0) if use_dropout: x = nn.Dropout( rate=dropout_rate, name=name + "dropout")(x) x = conv1x1( x=x, in_channels=mid_channels, out_channels=out_channels, use_bias=True, name=name + "/conv2") return x def mobilenetv3(channels, exp_channels, init_block_channels, final_block_channels, classifier_mid_channels, kernels3, use_relu, use_se, first_stride, final_use_se, in_channels=3, in_size=(224, 224), classes=1000): """ MobileNetV3 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- channels : list of list of int Number of output channels for each unit. exp_channels : list of list of int Number of middle (expanded) channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. classifier_mid_channels : int Number of middle channels for classifier. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. use_relu : list of list of int/bool Using ReLU activation flag for each unit. use_se : list of list of int/bool Using SE-block flag for each unit. first_stride : bool Whether to use stride for the first stage. final_use_se : bool Whether to use SE-module in the final block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = conv3x3_block( x=input, in_channels=in_channels, out_channels=init_block_channels, strides=2, activation="hswish", name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): exp_channels_ij = exp_channels[i][j] strides = 2 if (j == 0) and ((i != 0) or first_stride) else 1 use_kernel3 = kernels3[i][j] == 1 activation = "relu" if use_relu[i][j] == 1 else "hswish" use_se_flag = use_se[i][j] == 1 x = mobilenetv3_unit( x=x, in_channels=in_channels, out_channels=out_channels, exp_channels=exp_channels_ij, use_kernel3=use_kernel3, strides=strides, activation=activation, use_se=use_se_flag, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = mobilenetv3_final_block( x=x, in_channels=in_channels, out_channels=final_block_channels, use_se=final_use_se, name="features/final_block") in_channels = final_block_channels x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) x = mobilenetv3_classifier( x=x, in_channels=in_channels, out_channels=classes, mid_channels=classifier_mid_channels, dropout_rate=0.2, name="output") x = flatten(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_mobilenetv3(version, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create MobileNetV3 model with specific parameters. Parameters: ---------- version : str Version of MobileNetV3 ('small' or 'large'). width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ if version == "small": init_block_channels = 16 channels = [[16], [24, 24], [40, 40, 40, 48, 48], [96, 96, 96]] exp_channels = [[16], [72, 88], [96, 240, 240, 120, 144], [288, 576, 576]] kernels3 = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]] use_relu = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]] use_se = [[1], [0, 0], [1, 1, 1, 1, 1], [1, 1, 1]] first_stride = True final_block_channels = 576 elif version == "large": init_block_channels = 16 channels = [[16], [24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]] exp_channels = [[16], [64, 72], [72, 120, 120], [240, 200, 184, 184, 480, 672], [672, 960, 960]] kernels3 = [[1], [1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]] use_relu = [[1], [1, 1], [1, 1, 1], [0, 0, 0, 0, 0, 0], [0, 0, 0]] use_se = [[0], [0, 0], [1, 1, 1], [0, 0, 0, 0, 1, 1], [1, 1, 1]] first_stride = False final_block_channels = 960 else: raise ValueError("Unsupported MobileNetV3 version {}".format(version)) final_use_se = False classifier_mid_channels = 1280 if width_scale != 1.0: channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels] exp_channels = [[round_channels(cij * width_scale) for cij in ci] for ci in exp_channels] init_block_channels = round_channels(init_block_channels * width_scale) if width_scale > 1.0: final_block_channels = round_channels(final_block_channels * width_scale) net = mobilenetv3( channels=channels, exp_channels=exp_channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, classifier_mid_channels=classifier_mid_channels, kernels3=kernels3, use_relu=use_relu, use_se=use_se, first_stride=first_stride, final_use_se=final_use_se, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def mobilenetv3_small_w7d20(**kwargs): """ MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs) def mobilenetv3_small_wd2(**kwargs): """ MobileNetV3 Small 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.5, model_name="mobilenetv3_small_wd2", **kwargs) def mobilenetv3_small_w3d4(**kwargs): """ MobileNetV3 Small 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.75, model_name="mobilenetv3_small_w3d4", **kwargs) def mobilenetv3_small_w1(**kwargs): """ MobileNetV3 Small 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=1.0, model_name="mobilenetv3_small_w1", **kwargs) def mobilenetv3_small_w5d4(**kwargs): """ MobileNetV3 Small 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=1.25, model_name="mobilenetv3_small_w5d4", **kwargs) def mobilenetv3_large_w7d20(**kwargs): """ MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs) def mobilenetv3_large_wd2(**kwargs): """ MobileNetV3 Large 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.5, model_name="mobilenetv3_large_wd2", **kwargs) def mobilenetv3_large_w3d4(**kwargs): """ MobileNetV3 Large 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.75, model_name="mobilenetv3_large_w3d4", **kwargs) def mobilenetv3_large_w1(**kwargs): """ MobileNetV3 Large 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=1.0, model_name="mobilenetv3_large_w1", **kwargs) def mobilenetv3_large_w5d4(**kwargs): """ MobileNetV3 Large 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=1.25, model_name="mobilenetv3_large_w5d4", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ mobilenetv3_small_w7d20, mobilenetv3_small_wd2, mobilenetv3_small_w3d4, mobilenetv3_small_w1, mobilenetv3_small_w5d4, mobilenetv3_large_w7d20, mobilenetv3_large_wd2, mobilenetv3_large_w3d4, mobilenetv3_large_w1, mobilenetv3_large_w5d4, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != mobilenetv3_small_w7d20 or weight_count == 2159600) assert (model != mobilenetv3_small_wd2 or weight_count == 2288976) assert (model != mobilenetv3_small_w3d4 or weight_count == 2581312) assert (model != mobilenetv3_small_w1 or weight_count == 2945288) assert (model != mobilenetv3_small_w5d4 or weight_count == 3643632) assert (model != mobilenetv3_large_w7d20 or weight_count == 2943080) assert (model != mobilenetv3_large_wd2 or weight_count == 3334896) assert (model != mobilenetv3_large_w3d4 or weight_count == 4263496) assert (model != mobilenetv3_large_w1 or weight_count == 5481752) assert (model != mobilenetv3_large_w5d4 or weight_count == 7459144) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
18,859
32.204225
115
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/sepreresnet.py
""" SE-PreResNet for ImageNet-1K, implemented in Keras. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['sepreresnet', 'sepreresnet10', 'sepreresnet12', 'sepreresnet14', 'sepreresnet16', 'sepreresnet18', 'sepreresnet26', 'sepreresnetbc26b', 'sepreresnet34', 'sepreresnetbc38b', 'sepreresnet50', 'sepreresnet50b', 'sepreresnet101', 'sepreresnet101b', 'sepreresnet152', 'sepreresnet152b', 'sepreresnet200', 'sepreresnet200b'] import os from keras import layers as nn from keras.models import Model from .common import conv1x1, se_block, is_channels_first, flatten from .preresnet import preres_block, preres_bottleneck_block, preres_init_block, preres_activation def sepreres_unit(x, in_channels, out_channels, strides, bottleneck, conv1_stride, name="sepreres_unit"): """ SE-PreResNet unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. name : str, default 'sepreres_unit' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor. """ identity = x if bottleneck: x, x_pre_activ = preres_bottleneck_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, conv1_stride=conv1_stride, name=name + "/body") else: x, x_pre_activ = preres_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, name=name + "/body") x = se_block( x=x, channels=out_channels, name=name + "/se") resize_identity = (in_channels != out_channels) or (strides != 1) if resize_identity: identity = conv1x1( x=x_pre_activ, in_channels=in_channels, out_channels=out_channels, strides=strides, name=name + "/identity_conv") x = nn.add([x, identity], name=name + "/add") return x def sepreresnet(channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000): """ SE-PreResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = preres_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels, name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 x = sepreres_unit( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, bottleneck=bottleneck, conv1_stride=conv1_stride, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = preres_activation( x=x, name="features/post_activ") x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) # x = nn.Flatten()(x) x = flatten(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_sepreresnet(blocks, bottleneck=None, conv1_stride=True, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create PreResNet or SE-PreResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] elif blocks == 269: layers = [3, 30, 48, 8] else: raise ValueError("Unsupported SE-PreResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = sepreresnet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def sepreresnet10(**kwargs): """ SE-PreResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=10, model_name="sepreresnet10", **kwargs) def sepreresnet12(**kwargs): """ SE-PreResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=12, model_name="sepreresnet12", **kwargs) def sepreresnet14(**kwargs): """ SE-PreResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=14, model_name="sepreresnet14", **kwargs) def sepreresnet16(**kwargs): """ SE-PreResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=16, model_name="sepreresnet16", **kwargs) def sepreresnet18(**kwargs): """ SE-PreResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=18, model_name="sepreresnet18", **kwargs) def sepreresnet26(**kwargs): """ SE-PreResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=26, model_name="sepreresnet26", **kwargs) def sepreresnetbc26b(**kwargs): """ SE-PreResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc26b", **kwargs) def sepreresnet34(**kwargs): """ SE-PreResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=34, model_name="sepreresnet34", **kwargs) def sepreresnetbc38b(**kwargs): """ SE-PreResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc38b", **kwargs) def sepreresnet50(**kwargs): """ SE-PreResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=50, model_name="sepreresnet50", **kwargs) def sepreresnet50b(**kwargs): """ SE-PreResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=50, conv1_stride=False, model_name="sepreresnet50b", **kwargs) def sepreresnet101(**kwargs): """ SE-PreResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=101, model_name="sepreresnet101", **kwargs) def sepreresnet101b(**kwargs): """ SE-PreResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=101, conv1_stride=False, model_name="sepreresnet101b", **kwargs) def sepreresnet152(**kwargs): """ SE-PreResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=152, model_name="sepreresnet152", **kwargs) def sepreresnet152b(**kwargs): """ SE-PreResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=152, conv1_stride=False, model_name="sepreresnet152b", **kwargs) def sepreresnet200(**kwargs): """ SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs) def sepreresnet200b(**kwargs): """ SE-PreResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=200, conv1_stride=False, model_name="sepreresnet200b", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ sepreresnet10, sepreresnet12, sepreresnet14, sepreresnet16, sepreresnet18, sepreresnet26, sepreresnetbc26b, sepreresnet34, sepreresnetbc38b, sepreresnet50, sepreresnet50b, sepreresnet101, sepreresnet101b, sepreresnet152, sepreresnet152b, sepreresnet200, sepreresnet200b, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != sepreresnet10 or weight_count == 5461668) assert (model != sepreresnet12 or weight_count == 5536232) assert (model != sepreresnet14 or weight_count == 5833840) assert (model != sepreresnet16 or weight_count == 7022976) assert (model != sepreresnet18 or weight_count == 11776928) assert (model != sepreresnet26 or weight_count == 18092188) assert (model != sepreresnetbc26b or weight_count == 17388424) assert (model != sepreresnet34 or weight_count == 21957204) assert (model != sepreresnetbc38b or weight_count == 24019064) assert (model != sepreresnet50 or weight_count == 28080472) assert (model != sepreresnet50b or weight_count == 28080472) assert (model != sepreresnet101 or weight_count == 49319320) assert (model != sepreresnet101b or weight_count == 49319320) assert (model != sepreresnet152 or weight_count == 66814296) assert (model != sepreresnet152b or weight_count == 66814296) assert (model != sepreresnet200 or weight_count == 71828312) assert (model != sepreresnet200b or weight_count == 71828312) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
18,104
31.739602
119
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/resnext.py
""" ResNeXt for ImageNet-1K, implemented in Keras. Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. """ __all__ = ['resnext', 'resnext14_16x4d', 'resnext14_32x2d', 'resnext14_32x4d', 'resnext26_16x4d', 'resnext26_32x2d', 'resnext26_32x4d', 'resnext38_32x4d', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d', 'resnext_bottleneck'] import os import math from keras import layers as nn from keras.models import Model from .common import conv1x1_block, conv3x3_block, is_channels_first, flatten from .resnet import res_init_block def resnext_bottleneck(x, in_channels, out_channels, strides, cardinality, bottleneck_width, bottleneck_factor=4, name="resnext_bottleneck"): """ ResNeXt bottleneck block for residual path in ResNeXt unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. bottleneck_factor : int, default 4 Bottleneck factor. name : str, default 'resnext_bottleneck' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ mid_channels = out_channels // bottleneck_factor D = int(math.floor(mid_channels * (bottleneck_width / 64.0))) group_width = cardinality * D x = conv1x1_block( x=x, in_channels=in_channels, out_channels=group_width, name=name + "/conv1") x = conv3x3_block( x=x, in_channels=group_width, out_channels=group_width, strides=strides, groups=cardinality, name=name + "/conv2") x = conv1x1_block( x=x, in_channels=group_width, out_channels=out_channels, activation=None, name=name + "/conv3") return x def resnext_unit(x, in_channels, out_channels, strides, cardinality, bottleneck_width, name="resnext_unit"): """ ResNeXt unit with residual connection. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. name : str, default 'resnext_unit' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ resize_identity = (in_channels != out_channels) or (strides != 1) if resize_identity: identity = conv1x1_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, name=name + "/identity_conv") else: identity = x x = resnext_bottleneck( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, cardinality=cardinality, bottleneck_width=bottleneck_width, name=name + "/body") x = nn.add([x, identity], name=name + "/add") activ = nn.Activation("relu", name=name + "/activ") x = activ(x) return x def resnext(channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(224, 224), classes=1000): """ ResNeXt model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = res_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels, name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 x = resnext_unit( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, cardinality=cardinality, bottleneck_width=bottleneck_width, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) # x = nn.Flatten()(x) x = flatten(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_resnext(blocks, cardinality, bottleneck_width, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create ResNeXt model with specific parameters. Parameters: ---------- blocks : int Number of blocks. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ if blocks == 14: layers = [1, 1, 1, 1] elif blocks == 26: layers = [2, 2, 2, 2] elif blocks == 38: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] else: raise ValueError("Unsupported ResNeXt with number of blocks: {}".format(blocks)) assert (sum(layers) * 3 + 2 == blocks) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = resnext( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def resnext14_16x4d(**kwargs): """ ResNeXt-14 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnext(blocks=14, cardinality=16, bottleneck_width=4, model_name="resnext14_16x4d", **kwargs) def resnext14_32x2d(**kwargs): """ ResNeXt-14 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnext(blocks=14, cardinality=32, bottleneck_width=2, model_name="resnext14_32x2d", **kwargs) def resnext14_32x4d(**kwargs): """ ResNeXt-14 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnext(blocks=14, cardinality=32, bottleneck_width=4, model_name="resnext14_32x4d", **kwargs) def resnext26_16x4d(**kwargs): """ ResNeXt-26 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnext(blocks=26, cardinality=16, bottleneck_width=4, model_name="resnext26_16x4d", **kwargs) def resnext26_32x2d(**kwargs): """ ResNeXt-26 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnext(blocks=26, cardinality=32, bottleneck_width=2, model_name="resnext26_32x2d", **kwargs) def resnext26_32x4d(**kwargs): """ ResNeXt-26 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnext(blocks=26, cardinality=32, bottleneck_width=4, model_name="resnext26_32x4d", **kwargs) def resnext38_32x4d(**kwargs): """ ResNeXt-38 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnext(blocks=38, cardinality=32, bottleneck_width=4, model_name="resnext38_32x4d", **kwargs) def resnext50_32x4d(**kwargs): """ ResNeXt-50 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="resnext50_32x4d", **kwargs) def resnext101_32x4d(**kwargs): """ ResNeXt-101 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="resnext101_32x4d", **kwargs) def resnext101_64x4d(**kwargs): """ ResNeXt-101 (64x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_resnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="resnext101_64x4d", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ resnext14_16x4d, resnext14_32x2d, resnext14_32x4d, resnext26_16x4d, resnext26_32x2d, resnext26_32x4d, resnext38_32x4d, resnext50_32x4d, resnext101_32x4d, resnext101_64x4d, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnext14_16x4d or weight_count == 7127336) assert (model != resnext14_32x2d or weight_count == 7029416) assert (model != resnext14_32x4d or weight_count == 9411880) assert (model != resnext26_16x4d or weight_count == 10119976) assert (model != resnext26_32x2d or weight_count == 9924136) assert (model != resnext26_32x4d or weight_count == 15389480) assert (model != resnext38_32x4d or weight_count == 21367080) assert (model != resnext50_32x4d or weight_count == 25028904) assert (model != resnext101_32x4d or weight_count == 44177704) assert (model != resnext101_64x4d or weight_count == 83455272) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
14,656
30.45279
119
py
imgclsmob
imgclsmob-master/keras_/kerascv/models/senet.py
""" SENet for ImageNet-1K, implemented in Keras. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['senet', 'senet16', 'senet28', 'senet40', 'senet52', 'senet103', 'senet154'] import os import math from keras import layers as nn from keras.models import Model from .common import conv1x1_block, conv3x3_block, se_block, is_channels_first, flatten def senet_bottleneck(x, in_channels, out_channels, strides, cardinality, bottleneck_width, name="senet_bottleneck"): """ SENet bottleneck block for residual path in SENet unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. name : str, default 'senet_bottleneck' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ mid_channels = out_channels // 4 D = int(math.floor(mid_channels * (bottleneck_width / 64.0))) group_width = cardinality * D group_width2 = group_width // 2 x = conv1x1_block( x=x, in_channels=in_channels, out_channels=group_width2, name=name + "/conv1") x = conv3x3_block( x=x, in_channels=group_width2, out_channels=group_width, strides=strides, groups=cardinality, name=name + "/conv2") x = conv1x1_block( x=x, in_channels=group_width, out_channels=out_channels, activation=None, name=name + "/conv3") return x def senet_unit(x, in_channels, out_channels, strides, cardinality, bottleneck_width, identity_conv3x3, name="senet_unit"): """ SENet unit. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. identity_conv3x3 : bool, default False Whether to use 3x3 convolution in the identity link. name : str, default 'senet_unit' Unit name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ resize_identity = (in_channels != out_channels) or (strides != 1) if resize_identity: if identity_conv3x3: identity = conv3x3_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, name=name + "/identity_conv") else: identity = conv1x1_block( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, name=name + "/identity_conv") else: identity = x x = senet_bottleneck( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, cardinality=cardinality, bottleneck_width=bottleneck_width, name=name + "/body") x = se_block( x=x, channels=out_channels, name=name + "/se") x = nn.add([x, identity], name=name + "/add") activ = nn.Activation("relu", name=name + "/activ") x = activ(x) return x def senet_init_block(x, in_channels, out_channels, name="senet_init_block"): """ SENet specific initial block. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. name : str, default 'senet_init_block' Block name. Returns: ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ mid_channels = out_channels // 2 x = conv3x3_block( x=x, in_channels=in_channels, out_channels=mid_channels, strides=2, name=name + "/conv1") x = conv3x3_block( x=x, in_channels=mid_channels, out_channels=mid_channels, name=name + "/conv2") x = conv3x3_block( x=x, in_channels=mid_channels, out_channels=out_channels, name=name + "/conv3") x = nn.MaxPool2D( pool_size=3, strides=2, padding='same', name=name + "/pool")(x) return x def senet(channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(224, 224), classes=1000): """ SENet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\ (in_size[0], in_size[1], in_channels) input = nn.Input(shape=input_shape) x = senet_init_block( x=input, in_channels=in_channels, out_channels=init_block_channels, name="features/init_block") in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): identity_conv3x3 = (i != 0) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 x = senet_unit( x=x, in_channels=in_channels, out_channels=out_channels, strides=strides, cardinality=cardinality, bottleneck_width=bottleneck_width, identity_conv3x3=identity_conv3x3, name="features/stage{}/unit{}".format(i + 1, j + 1)) in_channels = out_channels x = nn.AvgPool2D( pool_size=7, strides=1, name="features/final_pool")(x) # x = nn.Flatten()(x) x = flatten(x) x = nn.Dropout( rate=0.2, name="output/dropout")(x) x = nn.Dense( units=classes, input_dim=in_channels, name="output/fc")(x) model = Model(inputs=input, outputs=x) model.in_size = in_size model.classes = classes return model def get_senet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".keras", "models"), **kwargs): """ Create SENet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ if blocks == 16: layers = [1, 1, 1, 1] cardinality = 32 elif blocks == 28: layers = [2, 2, 2, 2] cardinality = 32 elif blocks == 40: layers = [3, 3, 3, 3] cardinality = 32 elif blocks == 52: layers = [3, 4, 6, 3] cardinality = 32 elif blocks == 103: layers = [3, 4, 23, 3] cardinality = 32 elif blocks == 154: layers = [3, 8, 36, 3] cardinality = 64 else: raise ValueError("Unsupported SENet with number of blocks: {}".format(blocks)) bottleneck_width = 4 init_block_channels = 128 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = senet( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def senet16(**kwargs): """ SENet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_senet(blocks=16, model_name="senet16", **kwargs) def senet28(**kwargs): """ SENet-28 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_senet(blocks=28, model_name="senet28", **kwargs) def senet40(**kwargs): """ SENet-40 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_senet(blocks=40, model_name="senet40", **kwargs) def senet52(**kwargs): """ SENet-52 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_senet(blocks=52, model_name="senet52", **kwargs) def senet103(**kwargs): """ SENet-103 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_senet(blocks=103, model_name="senet103", **kwargs) def senet154(**kwargs): """ SENet-154 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_senet(blocks=154, model_name="senet154", **kwargs) def _test(): import numpy as np import keras pretrained = False models = [ senet16, senet28, senet40, senet52, senet103, senet154, ] for model in models: net = model(pretrained=pretrained) # net.summary() weight_count = keras.utils.layer_utils.count_params(net.trainable_weights) print("m={}, {}".format(model.__name__, weight_count)) assert (model != senet16 or weight_count == 31366168) assert (model != senet28 or weight_count == 36453768) assert (model != senet40 or weight_count == 41541368) assert (model != senet52 or weight_count == 44659416) assert (model != senet103 or weight_count == 60963096) assert (model != senet154 or weight_count == 115088984) if is_channels_first(): x = np.zeros((1, 3, 224, 224), np.float32) else: x = np.zeros((1, 224, 224, 3), np.float32) y = net.predict(x) assert (y.shape == (1, 1000)) if __name__ == "__main__": _test()
13,026
27.381264
115
py