repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/dabnet.py
""" DABNet for image segmentation, implemented in TensorFlow. Original paper: 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,' https://arxiv.org/abs/1907.11357. """ __all__ = ['DABNet', 'dabnet_cityscapes'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1, conv3x3, conv3x3_block, ConvBlock, NormActivation, Concurrent, InterpolationBlock,\ DualPathSequential, SimpleSequential, is_channels_first, get_im_size, PReLU2, MaxPool2d, AvgPool2d, get_channel_axis class DwaConvBlock(nn.Layer): """ Depthwise asymmetric separable convolution block. Parameters: ---------- channels : int Number of input/output channels. kernel_size : int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int Padding value for convolution layer. dilation : int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, kernel_size, strides, padding, dilation=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation="relu", data_format="channels_last", **kwargs): super(DwaConvBlock, self).__init__(**kwargs) self.conv1 = ConvBlock( in_channels=channels, out_channels=channels, kernel_size=(kernel_size, 1), strides=strides, padding=(padding, 0), dilation=(dilation, 1), groups=channels, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation, data_format=data_format, name="conv1") self.conv2 = ConvBlock( in_channels=channels, out_channels=channels, kernel_size=(1, kernel_size), strides=strides, padding=(0, padding), dilation=(1, dilation), groups=channels, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) return x def dwa_conv3x3_block(channels, strides=1, padding=1, dilation=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation="relu", data_format="channels_last", **kwargs): """ 3x3 version of the depthwise asymmetric separable convolution block. Parameters: ---------- channels : int Number of input/output channels. strides : int, default 1 Strides of the convolution. padding : int, default 1 Padding value for convolution layer. dilation : int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return DwaConvBlock( channels=channels, kernel_size=3, strides=strides, padding=padding, dilation=dilation, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation, data_format=data_format, **kwargs) class DABBlock(nn.Layer): """ DABNet specific base block. Parameters: ---------- channels : int Number of input/output channels. dilation : int Dilation value for a dilated branch in the unit. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, dilation, bn_eps, data_format="channels_last", **kwargs): super(DABBlock, self).__init__(**kwargs) self.axis = get_channel_axis(data_format) mid_channels = channels // 2 self.norm_activ1 = NormActivation( in_channels=channels, bn_eps=bn_eps, activation=(lambda: PReLU2(channels, data_format=data_format, name="activ")), data_format=data_format, name="norm_activ1") self.conv1 = conv3x3_block( in_channels=channels, out_channels=mid_channels, bn_eps=bn_eps, activation=(lambda: PReLU2(mid_channels, data_format=data_format, name="activ")), data_format=data_format, name="conv1") self.branches = Concurrent( stack=True, data_format=data_format, name="branches") self.branches.add(dwa_conv3x3_block( channels=mid_channels, bn_eps=bn_eps, activation=(lambda: PReLU2(mid_channels, data_format=data_format, name="activ")), data_format=data_format, name="branches1")) self.branches.add(dwa_conv3x3_block( channels=mid_channels, padding=dilation, dilation=dilation, bn_eps=bn_eps, activation=(lambda: PReLU2(mid_channels, data_format=data_format, name="activ")), data_format=data_format, name="branches2")) self.norm_activ2 = NormActivation( in_channels=mid_channels, bn_eps=bn_eps, activation=(lambda: PReLU2(mid_channels, data_format=data_format, name="activ")), data_format=data_format, name="norm_activ2") self.conv2 = conv1x1( in_channels=mid_channels, out_channels=channels, data_format=data_format, name="conv2") def call(self, x, training=None): identity = x x = self.norm_activ1(x, training=training) x = self.conv1(x, training=training) x = self.branches(x, training=training) x = tf.math.reduce_sum(x, axis=self.axis) x = self.norm_activ2(x, training=training) x = self.conv2(x) x = x + identity return x class DownBlock(nn.Layer): """ DABNet specific downsample block for the main branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, bn_eps, data_format="channels_last", **kwargs): super(DownBlock, self).__init__(**kwargs) self.axis = get_channel_axis(data_format) self.expand = (in_channels < out_channels) mid_channels = out_channels - in_channels if self.expand else out_channels self.conv = conv3x3( in_channels=in_channels, out_channels=mid_channels, strides=2, data_format=data_format, name="conv") if self.expand: self.pool = MaxPool2d( pool_size=2, strides=2, data_format=data_format, name="pool") self.norm_activ = NormActivation( in_channels=out_channels, bn_eps=bn_eps, activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")), data_format=data_format, name="norm_activ") def call(self, x, training=None): y = self.conv(x) if self.expand: z = self.pool(x) y = tf.concat([y, z], axis=self.axis) y = self.norm_activ(y, training=training) return y class DABUnit(nn.Layer): """ DABNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dilations : list of int Dilations for blocks. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, dilations, bn_eps, data_format="channels_last", **kwargs): super(DABUnit, self).__init__(**kwargs) self.axis = get_channel_axis(data_format) mid_channels = out_channels // 2 self.down = DownBlock( in_channels=in_channels, out_channels=mid_channels, bn_eps=bn_eps, data_format=data_format, name="down") self.blocks = SimpleSequential(name="blocks") for i, dilation in enumerate(dilations): self.blocks.add(DABBlock( channels=mid_channels, dilation=dilation, bn_eps=bn_eps, data_format=data_format, name="block{}".format(i + 1))) def call(self, x, training=None): x = self.down(x, training=training) y = self.blocks(x, training=training) x = tf.concat([y, x], axis=self.axis) return x class DABStage(nn.Layer): """ DABNet stage. Parameters: ---------- x_channels : int Number of input/output channels for x. y_in_channels : int Number of input channels for y. y_out_channels : int Number of output channels for y. dilations : list of int Dilations for blocks. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, x_channels, y_in_channels, y_out_channels, dilations, bn_eps, data_format="channels_last", **kwargs): super(DABStage, self).__init__(**kwargs) self.axis = get_channel_axis(data_format) self.use_unit = (len(dilations) > 0) self.x_down = AvgPool2d( pool_size=3, strides=2, padding=1, data_format=data_format, name="x_down") if self.use_unit: self.unit = DABUnit( in_channels=y_in_channels, out_channels=(y_out_channels - x_channels), dilations=dilations, bn_eps=bn_eps, data_format=data_format, name="unit") self.norm_activ = NormActivation( in_channels=y_out_channels, bn_eps=bn_eps, activation=(lambda: PReLU2(y_out_channels, data_format=data_format, name="activ")), data_format=data_format, name="norm_activ") def call(self, y, x, training=None): x = self.x_down(x) if self.use_unit: y = self.unit(y, training=training) y = tf.concat([y, x], axis=self.axis) y = self.norm_activ(y, training=training) return y, x class DABInitBlock(nn.Layer): """ DABNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, bn_eps, data_format="channels_last", **kwargs): super(DABInitBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=2, bn_eps=bn_eps, activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")), data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bn_eps=bn_eps, activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")), data_format=data_format, name="conv2") self.conv3 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bn_eps=bn_eps, activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")), data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) return x class DABNet(tf.keras.Model): """ DABNet model from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,' https://arxiv.org/abs/1907.11357. Parameters: ---------- channels : list of int Number of output channels for each unit (for y-branch). init_block_channels : int Number of output channels for the initial unit. dilations : list of list of int Dilations for blocks. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. classes : int, default 19 Number of segmentation classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, dilations, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), classes=19, data_format="channels_last", **kwargs): super(DABNet, self).__init__(**kwargs) assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.classes = classes self.fixed_size = fixed_size self.data_format = data_format self.features = DualPathSequential( return_two=False, first_ordinals=1, last_ordinals=0, name="features") self.features.add(DABInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps, data_format=data_format, name="init_block")) y_in_channels = init_block_channels for i, (y_out_channels, dilations_i) in enumerate(zip(channels, dilations)): self.features.add(DABStage( x_channels=in_channels, y_in_channels=y_in_channels, y_out_channels=y_out_channels, dilations=dilations_i, bn_eps=bn_eps, data_format=data_format, name="stage{}".format(i + 1))) y_in_channels = y_out_channels self.classifier = conv1x1( in_channels=y_in_channels, out_channels=classes, data_format=data_format, name="classifier") self.up = InterpolationBlock( scale_factor=8, data_format=data_format, name="up") def call(self, x, training=None): in_size = self.in_size if self.fixed_size else get_im_size(x, data_format=self.data_format) y = self.features(x, x, training=training) y = self.classifier(y) y = self.up(y, size=in_size) return y def get_dabnet(model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create DABNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ init_block_channels = 32 channels = [35, 131, 259] dilations = [[], [2, 2, 2], [4, 4, 8, 8, 16, 16]] bn_eps = 1e-3 net = DABNet( channels=channels, init_block_channels=init_block_channels, dilations=dilations, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root), by_name=True, skip_mismatch=True) return net def dabnet_cityscapes(classes=19, **kwargs): """ DABNet model for Cityscapes from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,' https://arxiv.org/abs/1907.11357. Parameters: ---------- classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_dabnet(classes=classes, model_name="dabnet_cityscapes", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False in_size = (1024, 2048) classes = 19 models = [ dabnet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, data_format=data_format) batch = 4 x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else (batch, in_size[0], in_size[1], 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, classes, in_size[0], in_size[1]) if is_channels_first(data_format) else tuple(y.shape.as_list()) == (batch, in_size[0], in_size[1], classes)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != dabnet_cityscapes or weight_count == 756643) if __name__ == "__main__": _test()
20,630
31.592417
120
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/cgnet.py
""" CGNet for image segmentation, implemented in TensorFlow. Original paper: 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,' https://arxiv.org/abs/1811.08201. """ __all__ = ['CGNet', 'cgnet_cityscapes'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import NormActivation, conv1x1, conv1x1_block, conv3x3_block, depthwise_conv3x3, SEBlock, Concurrent,\ DualPathSequential, InterpolationBlock, SimpleSequential, is_channels_first, get_im_size, PReLU2, AvgPool2d,\ get_channel_axis class CGBlock(nn.Layer): """ CGNet block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dilation : int Dilation value. se_reduction : int SE-block reduction value. down : bool Whether to downsample. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, dilation, se_reduction, down, bn_eps, data_format="channels_last", **kwargs): super(CGBlock, self).__init__(**kwargs) self.down = down if self.down: mid1_channels = out_channels mid2_channels = 2 * out_channels else: mid1_channels = out_channels // 2 mid2_channels = out_channels if self.down: self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=2, bn_eps=bn_eps, activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")), data_format=data_format, name="conv1") else: self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid1_channels, bn_eps=bn_eps, activation=(lambda: PReLU2(mid1_channels, data_format=data_format, name="activ")), data_format=data_format, name="conv1") self.branches = Concurrent( data_format=data_format, name="branches") self.branches.add(depthwise_conv3x3( channels=mid1_channels, data_format=data_format, name="branches1")) self.branches.add(depthwise_conv3x3( channels=mid1_channels, padding=dilation, dilation=dilation, data_format=data_format, name="branches2")) self.norm_activ = NormActivation( in_channels=mid2_channels, bn_eps=bn_eps, activation=(lambda: PReLU2(mid2_channels, data_format=data_format, name="activ")), data_format=data_format, name="norm_activ") if self.down: self.conv2 = conv1x1( in_channels=mid2_channels, out_channels=out_channels, data_format=data_format, name="conv2") self.se = SEBlock( channels=out_channels, reduction=se_reduction, use_conv=False, data_format=data_format, name="se") def call(self, x, training=None): if not self.down: identity = x x = self.conv1(x, training=training) x = self.branches(x, training=training) x = self.norm_activ(x, training=training) if self.down: x = self.conv2(x, training=training) x = self.se(x, training=training) if not self.down: x += identity return x class CGUnit(nn.Layer): """ CGNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. layers : int Number of layers. dilation : int Dilation value. se_reduction : int SE-block reduction value. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, layers, dilation, se_reduction, bn_eps, data_format="channels_last", **kwargs): super(CGUnit, self).__init__(**kwargs) self.axis = get_channel_axis(data_format) mid_channels = out_channels // 2 self.down = CGBlock( in_channels=in_channels, out_channels=mid_channels, dilation=dilation, se_reduction=se_reduction, down=True, bn_eps=bn_eps, data_format=data_format, name="down") self.blocks = SimpleSequential(name="blocks") for i in range(layers - 1): self.blocks.add(CGBlock( in_channels=mid_channels, out_channels=mid_channels, dilation=dilation, se_reduction=se_reduction, down=False, bn_eps=bn_eps, data_format=data_format, name="block{}".format(i + 1))) def call(self, x, training=None): x = self.down(x, training=training) y = self.blocks(x, training=training) x = tf.concat([y, x], axis=self.axis) # NB: This differs from the original implementation. return x class CGStage(nn.Layer): """ CGNet stage. Parameters: ---------- x_channels : int Number of input/output channels for x. y_in_channels : int Number of input channels for y. y_out_channels : int Number of output channels for y. layers : int Number of layers in the unit. dilation : int Dilation for blocks. se_reduction : int SE-block reduction value for blocks. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, x_channels, y_in_channels, y_out_channels, layers, dilation, se_reduction, bn_eps, data_format="channels_last", **kwargs): super(CGStage, self).__init__(**kwargs) self.axis = get_channel_axis(data_format) self.use_x = (x_channels > 0) self.use_unit = (layers > 0) if self.use_x: self.x_down = AvgPool2d( pool_size=3, strides=2, padding=1, data_format=data_format, name="x_down") if self.use_unit: self.unit = CGUnit( in_channels=y_in_channels, out_channels=(y_out_channels - x_channels), layers=layers, dilation=dilation, se_reduction=se_reduction, bn_eps=bn_eps, data_format=data_format, name="unit") self.norm_activ = NormActivation( in_channels=y_out_channels, bn_eps=bn_eps, activation=(lambda: PReLU2(y_out_channels, data_format=data_format, name="activ")), data_format=data_format, name="norm_activ") def call(self, y, x=None, training=None): if self.use_unit: y = self.unit(y, training=training) if self.use_x: x = self.x_down(x) y = tf.concat([y, x], axis=self.axis) y = self.norm_activ(y, training=training) return y, x class CGInitBlock(nn.Layer): """ CGNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, bn_eps, data_format="channels_last", **kwargs): super(CGInitBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=2, bn_eps=bn_eps, activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")), data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bn_eps=bn_eps, activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")), data_format=data_format, name="conv2") self.conv3 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bn_eps=bn_eps, activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")), data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) return x class CGNet(tf.keras.Model): """ CGNet model from 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,' https://arxiv.org/abs/1811.08201. Parameters: ---------- layers : list of int Number of layers for each unit. channels : list of int Number of output channels for each unit (for y-branch). init_block_channels : int Number of output channels for the initial unit. dilations : list of int Dilations for each unit. se_reductions : list of int SE-block reduction value for each unit. cut_x : list of int Whether to concatenate with x-branch for each unit. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. classes : int, default 19 Number of segmentation classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, layers, channels, init_block_channels, dilations, se_reductions, cut_x, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), classes=19, data_format="channels_last", **kwargs): super(CGNet, self).__init__(**kwargs) assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.classes = classes self.fixed_size = fixed_size self.data_format = data_format self.features = DualPathSequential( return_two=False, first_ordinals=1, last_ordinals=0, name="features") self.features.add(CGInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps, data_format=data_format, name="init_block")) y_in_channels = init_block_channels for i, (layers_i, y_out_channels) in enumerate(zip(layers, channels)): self.features.add(CGStage( x_channels=in_channels if cut_x[i] == 1 else 0, y_in_channels=y_in_channels, y_out_channels=y_out_channels, layers=layers_i, dilation=dilations[i], se_reduction=se_reductions[i], bn_eps=bn_eps, data_format=data_format, name="stage{}".format(i + 1))) y_in_channels = y_out_channels self.classifier = conv1x1( in_channels=y_in_channels, out_channels=classes, data_format=data_format, name="classifier") self.up = InterpolationBlock( scale_factor=8, data_format=data_format, name="up") def call(self, x, training=None): in_size = self.in_size if self.fixed_size else get_im_size(x, data_format=self.data_format) y = self.features(x, x, training=training) y = self.classifier(y) y = self.up(y, size=in_size) return y def get_cgnet(model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create CGNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ init_block_channels = 32 layers = [0, 3, 21] channels = [35, 131, 256] dilations = [0, 2, 4] se_reductions = [0, 8, 16] cut_x = [1, 1, 0] bn_eps = 1e-3 net = CGNet( layers=layers, channels=channels, init_block_channels=init_block_channels, dilations=dilations, se_reductions=se_reductions, cut_x=cut_x, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root), by_name=True, skip_mismatch=True) return net def cgnet_cityscapes(classes=19, **kwargs): """ CGNet model for Cityscapes from 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,' https://arxiv.org/abs/1811.08201. Parameters: ---------- classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_cgnet(classes=classes, model_name="cgnet_cityscapes", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False in_size = (1024, 2048) classes = 19 models = [ cgnet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, data_format=data_format) batch = 4 x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else (batch, in_size[0], in_size[1], 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, classes, in_size[0], in_size[1]) if is_channels_first(data_format) else tuple(y.shape.as_list()) == (batch, in_size[0], in_size[1], classes)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != cgnet_cityscapes or weight_count == 496306) if __name__ == "__main__": _test()
16,751
31.528155
118
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/fbnet.py
""" FBNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,' https://arxiv.org/abs/1812.03443. """ __all__ = ['FBNet', 'fbnet_cb'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SimpleSequential, flatten,\ is_channels_first class FBNetUnit(nn.Layer): """ FBNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the second convolution layer. bn_eps : float Small float added to variance in Batch norm. use_kernel3 : bool Whether to use 3x3 (instead of 5x5) kernel. exp_factor : int Expansion factor for each unit. activation : str, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, bn_eps, use_kernel3, exp_factor, activation="relu", data_format="channels_last", **kwargs): super(FBNetUnit, self).__init__(**kwargs) assert (exp_factor >= 1) self.residual = (in_channels == out_channels) and (strides == 1) self.use_exp_conv = True mid_channels = exp_factor * in_channels if self.use_exp_conv: self.exp_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bn_eps=bn_eps, activation=activation, data_format=data_format, name="exp_conv") if use_kernel3: self.conv1 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, strides=strides, bn_eps=bn_eps, activation=activation, data_format=data_format, name="conv1") else: self.conv1 = dwconv5x5_block( in_channels=mid_channels, out_channels=mid_channels, strides=strides, bn_eps=bn_eps, activation=activation, data_format=data_format, name="conv1") self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, bn_eps=bn_eps, activation=None, data_format=data_format, name="conv2") def call(self, x, training=None): if self.residual: identity = x if self.use_exp_conv: x = self.exp_conv(x, training=training) x = self.conv1(x, training=training) x = self.conv2(x, training=training) if self.residual: x = x + identity return x class FBNetInitBlock(nn.Layer): """ FBNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, bn_eps, data_format="channels_last", **kwargs): super(FBNetInitBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=2, bn_eps=bn_eps, data_format=data_format, name="conv1") self.conv2 = FBNetUnit( in_channels=out_channels, out_channels=out_channels, strides=1, bn_eps=bn_eps, use_kernel3=True, exp_factor=1, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) return x class FBNet(tf.keras.Model): """ FBNet model from 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,' https://arxiv.org/abs/1812.03443. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. exp_factors : list of list of int Expansion factor for each unit. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, final_block_channels, kernels3, exp_factors, bn_eps=1e-5, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(FBNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(FBNetInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) else 1 use_kernel3 = kernels3[i][j] == 1 exp_factor = exp_factors[i][j] stage.add(FBNetUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, bn_eps=bn_eps, use_kernel3=use_kernel3, exp_factor=exp_factor, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(conv1x1_block( in_channels=in_channels, out_channels=final_block_channels, bn_eps=bn_eps, data_format=data_format, name="final_block")) in_channels = final_block_channels self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_fbnet(version, bn_eps=1e-5, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create FBNet model with specific parameters. Parameters: ---------- version : str Version of MobileNetV3 ('a', 'b' or 'c'). bn_eps : float, default 1e-5 Small float added to variance in Batch norm. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if version == "c": init_block_channels = 16 final_block_channels = 1984 channels = [[24, 24, 24], [32, 32, 32, 32], [64, 64, 64, 64, 112, 112, 112, 112], [184, 184, 184, 184, 352]] kernels3 = [[1, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1]] exp_factors = [[6, 1, 1], [6, 3, 6, 6], [6, 3, 6, 6, 6, 6, 6, 3], [6, 6, 6, 6, 6]] else: raise ValueError("Unsupported FBNet version {}".format(version)) net = FBNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, kernels3=kernels3, exp_factors=exp_factors, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def fbnet_cb(**kwargs): """ FBNet-Cb model (bn_eps=1e-3) from 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,' https://arxiv.org/abs/1812.03443. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_fbnet(version="c", bn_eps=1e-3, model_name="fbnet_cb", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ fbnet_cb, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != fbnet_cb or weight_count == 5572200) if __name__ == "__main__": _test()
11,383
32.581121
116
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/visemenet.py
""" VisemeNet for speech-driven facial animation, implemented in TensorFlow. Original paper: 'VisemeNet: Audio-Driven Animator-Centric Speech Animation,' https://arxiv.org/abs/1805.09488. """ __all__ = ['VisemeNet', 'visemenet20'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import DenseBlock, SimpleSequential class VisemeDenseBranch(tf.keras.Model): """ VisemeNet dense branch. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of int Number of middle/output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels_list, data_format="channels_last", **kwargs): super(VisemeDenseBranch, self).__init__(**kwargs) self.branch = SimpleSequential(name="branch") for i, out_channels in enumerate(out_channels_list[:-1]): self.branch.add(DenseBlock( in_channels=in_channels, out_channels=out_channels, use_bias=True, use_bn=True, data_format=data_format, name="block{}".format(i + 1))) in_channels = out_channels self.final_fc = nn.Dense( units=out_channels_list[-1], input_dim=in_channels, name="final_fc") def call(self, x, training=None): x = self.branch(x, training=training) y = self.final_fc(x) return y, x class VisemeRnnBranch(nn.Layer): """ VisemeNet RNN branch. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of int Number of middle/output channels. rnn_num_layers : int Number of RNN layers. dropout_rate : float Dropout rate. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels_list, rnn_num_layers, dropout_rate, data_format="channels_last", **kwargs): super(VisemeRnnBranch, self).__init__(**kwargs) assert (in_channels is not None) self.rnn = nn.RNN([nn.LSTMCell( units=out_channels_list[0], dropout=dropout_rate, name="rnn{}".format(i + 1) ) for i in range(rnn_num_layers)]) self.fc_branch = VisemeDenseBranch( in_channels=out_channels_list[0], out_channels_list=out_channels_list[1:], data_format=data_format, name="fc_branch") def call(self, x, training=None): x = self.rnn(x, training=training) # x = x[:, -1, :] y, _ = self.fc_branch(x, training=training) return y class VisemeNet(tf.keras.Model): """ VisemeNet model from 'VisemeNet: Audio-Driven Animator-Centric Speech Animation,' https://arxiv.org/abs/1805.09488. Parameters: ---------- audio_features : int, default 195 Number of audio features (characters/sounds). audio_window_size : int, default 8 Size of audio window (for time related audio features). stage2_window_size : int, default 64 Size of window for stage #2. num_face_ids : int, default 76 Number of face IDs. num_landmarks : int, default 76 Number of landmarks. num_phonemes : int, default 21 Number of phonemes. num_visemes : int, default 20 Number of visemes. dropout_rate : float, default 0.5 Dropout rate for RNNs. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, audio_features=195, audio_window_size=8, stage2_window_size=64, num_face_ids=76, num_landmarks=76, num_phonemes=21, num_visemes=20, dropout_rate=0.5, data_format="channels_last", **kwargs): super(VisemeNet, self).__init__(**kwargs) stage1_rnn_hidden_size = 256 stage1_fc_mid_channels = 256 stage2_rnn_in_features = (audio_features + num_landmarks + stage1_fc_mid_channels) * \ stage2_window_size // audio_window_size self.audio_window_size = audio_window_size self.stage2_window_size = stage2_window_size self.stage1_rnn = nn.RNN([nn.LSTMCell( units=stage1_rnn_hidden_size, dropout=dropout_rate, name="stage1_rnn{}".format(i + 1) ) for i in range(3)]) self.lm_branch = VisemeDenseBranch( in_channels=(stage1_rnn_hidden_size + num_face_ids), out_channels_list=[stage1_fc_mid_channels, num_landmarks], data_format=data_format, name="lm_branch") self.ph_branch = VisemeDenseBranch( in_channels=(stage1_rnn_hidden_size + num_face_ids), out_channels_list=[stage1_fc_mid_channels, num_phonemes], data_format=data_format, name="ph_branch") self.cls_branch = VisemeRnnBranch( in_channels=stage2_rnn_in_features, out_channels_list=[256, 200, num_visemes], rnn_num_layers=1, dropout_rate=dropout_rate, data_format=data_format, name="cls_branch") self.reg_branch = VisemeRnnBranch( in_channels=stage2_rnn_in_features, out_channels_list=[256, 200, 100, num_visemes], rnn_num_layers=3, dropout_rate=dropout_rate, data_format=data_format, name="reg_branch") self.jali_branch = VisemeRnnBranch( in_channels=stage2_rnn_in_features, out_channels_list=[128, 200, 2], rnn_num_layers=3, dropout_rate=dropout_rate, data_format=data_format, name="jali_branch") def call(self, x, pid, training=None): y = self.stage1_rnn(x, training=training) # y = y[:, -1, :] y = tf.concat([y, tf.cast(pid, tf.float32)], axis=1) lm, _ = self.lm_branch(y, training=training) lm += tf.cast(pid, tf.float32) ph, ph1 = self.ph_branch(y, training=training) z = tf.concat([lm, ph1], axis=1) z2 = tf.concat([z, x[:, self.audio_window_size // 2, :]], axis=1) n_net2_input = z2.shape[1] z2 = tf.concat([tf.zeros((self.stage2_window_size // 2, n_net2_input)), z2], axis=0) z = tf.stack( [tf.reshape( z2[i:i + self.stage2_window_size], shape=(self.audio_window_size, n_net2_input * self.stage2_window_size // self.audio_window_size)) for i in range(z2.shape[0] - self.stage2_window_size)], axis=0) cls = self.cls_branch(z, training=training) reg = self.reg_branch(z, training=training) jali = self.jali_branch(z, training=training) return cls, reg, jali def get_visemenet(model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create VisemeNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ net = VisemeNet( **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def visemenet20(**kwargs): """ VisemeNet model for 20 visemes (without co-articulation rules) from 'VisemeNet: Audio-Driven Animator-Centric Speech Animation,' https://arxiv.org/abs/1805.09488. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_visemenet(model_name="visemenet20", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False models = [ visemenet20, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 34 audio_window_size = 8 audio_features = 195 num_face_ids = 76 num_visemes = 20 x = tf.random.normal((batch, audio_window_size, audio_features)) pid = tf.fill(dims=(batch, num_face_ids), value=3) y1, y2, y3 = net(x, pid) assert (y1.shape[0] == y2.shape[0] == y3.shape[0]) assert (y1.shape[-1] == y2.shape[-1] == num_visemes) assert (y3.shape[-1] == 2) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) # assert (model != visemenet20 or weight_count == 14574303) assert (model != visemenet20 or weight_count == 14565599) print(net.summary()) if __name__ == "__main__": _test()
10,166
33.11745
119
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/mobilenetv3.py
""" MobileNetV3 for ImageNet-1K, implemented in TensorFlow. Original paper: 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. """ __all__ = ['MobileNetV3', 'mobilenetv3_small_w7d20', 'mobilenetv3_small_wd2', 'mobilenetv3_small_w3d4', 'mobilenetv3_small_w1', 'mobilenetv3_small_w5d4', 'mobilenetv3_large_w7d20', 'mobilenetv3_large_wd2', 'mobilenetv3_large_w3d4', 'mobilenetv3_large_w1', 'mobilenetv3_large_w5d4'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock,\ HSwish, SimpleSequential, flatten, is_channels_first class MobileNetV3Unit(nn.Layer): """ MobileNetV3 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. exp_channels : int Number of middle (expanded) channels. strides : int or tuple/list of 2 int Strides of the second convolution layer. use_kernel3 : bool Whether to use 3x3 (instead of 5x5) kernel. activation : str Activation function or name of activation function. use_se : bool Whether to use SE-module. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, exp_channels, strides, use_kernel3, activation, use_se, data_format="channels_last", **kwargs): super(MobileNetV3Unit, self).__init__(**kwargs) assert (exp_channels >= out_channels) self.residual = (in_channels == out_channels) and (strides == 1) self.use_se = use_se self.use_exp_conv = exp_channels != out_channels mid_channels = exp_channels if self.use_exp_conv: self.exp_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=activation, data_format=data_format, name="exp_conv") if use_kernel3: self.conv1 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, strides=strides, activation=activation, data_format=data_format, name="conv1") else: self.conv1 = dwconv5x5_block( in_channels=mid_channels, out_channels=mid_channels, strides=strides, activation=activation, data_format=data_format, name="conv1") if self.use_se: self.se = SEBlock( channels=mid_channels, reduction=4, round_mid=True, out_activation="hsigmoid", data_format=data_format, name="se") self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None, data_format=data_format, name="conv2") def call(self, x, training=None): if self.residual: identity = x if self.use_exp_conv: x = self.exp_conv(x, training=training) x = self.conv1(x, training=training) if self.use_se: x = self.se(x) x = self.conv2(x, training=training) if self.residual: x = x + identity return x class MobileNetV3FinalBlock(nn.Layer): """ MobileNetV3 final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_se : bool Whether to use SE-module. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, use_se, data_format="channels_last", **kwargs): super(MobileNetV3FinalBlock, self).__init__(**kwargs) self.use_se = use_se self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation="hswish", data_format=data_format, name="conv") if self.use_se: self.se = SEBlock( channels=out_channels, reduction=4, round_mid=True, out_activation="hsigmoid", data_format=data_format, name="se") def call(self, x, training=None): x = self.conv(x, training=training) if self.use_se: x = self.se(x) return x class MobileNetV3Classifier(nn.Layer): """ MobileNetV3 classifier. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, mid_channels, dropout_rate, data_format="channels_last", **kwargs): super(MobileNetV3Classifier, self).__init__(**kwargs) self.use_dropout = (dropout_rate != 0.0) self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") self.activ = HSwish() if self.use_dropout: self.dropout = nn.Dropout( rate=dropout_rate, name="dropout") self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, use_bias=True, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x) x = self.activ(x) if self.use_dropout: x = self.dropout(x, training=training) x = self.conv2(x) return x class MobileNetV3(tf.keras.Model): """ MobileNetV3 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- channels : list of list of int Number of output channels for each unit. exp_channels : list of list of int Number of middle (expanded) channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. classifier_mid_channels : int Number of middle channels for classifier. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. use_relu : list of list of int/bool Using ReLU activation flag for each unit. use_se : list of list of int/bool Using SE-block flag for each unit. first_stride : bool Whether to use stride for the first stage. final_use_se : bool Whether to use SE-module in the final block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, exp_channels, init_block_channels, final_block_channels, classifier_mid_channels, kernels3, use_relu, use_se, first_stride, final_use_se, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(MobileNetV3, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, strides=2, activation="hswish", data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): exp_channels_ij = exp_channels[i][j] strides = 2 if (j == 0) and ((i != 0) or first_stride) else 1 use_kernel3 = kernels3[i][j] == 1 activation = "relu" if use_relu[i][j] == 1 else "hswish" use_se_flag = use_se[i][j] == 1 stage.add(MobileNetV3Unit( in_channels=in_channels, out_channels=out_channels, exp_channels=exp_channels_ij, use_kernel3=use_kernel3, strides=strides, activation=activation, use_se=use_se_flag, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(MobileNetV3FinalBlock( in_channels=in_channels, out_channels=final_block_channels, use_se=final_use_se, data_format=data_format, name="final_block")) in_channels = final_block_channels self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = MobileNetV3Classifier( in_channels=in_channels, out_channels=classes, mid_channels=classifier_mid_channels, dropout_rate=0.2, data_format=data_format, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = self.output1(x, training=training) x = flatten(x, self.data_format) return x def get_mobilenetv3(version, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create MobileNetV3 model with specific parameters. Parameters: ---------- version : str Version of MobileNetV3 ('small' or 'large'). width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if version == "small": init_block_channels = 16 channels = [[16], [24, 24], [40, 40, 40, 48, 48], [96, 96, 96]] exp_channels = [[16], [72, 88], [96, 240, 240, 120, 144], [288, 576, 576]] kernels3 = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]] use_relu = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]] use_se = [[1], [0, 0], [1, 1, 1, 1, 1], [1, 1, 1]] first_stride = True final_block_channels = 576 elif version == "large": init_block_channels = 16 channels = [[16], [24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]] exp_channels = [[16], [64, 72], [72, 120, 120], [240, 200, 184, 184, 480, 672], [672, 960, 960]] kernels3 = [[1], [1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]] use_relu = [[1], [1, 1], [1, 1, 1], [0, 0, 0, 0, 0, 0], [0, 0, 0]] use_se = [[0], [0, 0], [1, 1, 1], [0, 0, 0, 0, 1, 1], [1, 1, 1]] first_stride = False final_block_channels = 960 else: raise ValueError("Unsupported MobileNetV3 version {}".format(version)) final_use_se = False classifier_mid_channels = 1280 if width_scale != 1.0: channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels] exp_channels = [[round_channels(cij * width_scale) for cij in ci] for ci in exp_channels] init_block_channels = round_channels(init_block_channels * width_scale) if width_scale > 1.0: final_block_channels = round_channels(final_block_channels * width_scale) net = MobileNetV3( channels=channels, exp_channels=exp_channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, classifier_mid_channels=classifier_mid_channels, kernels3=kernels3, use_relu=use_relu, use_se=use_se, first_stride=first_stride, final_use_se=final_use_se, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def mobilenetv3_small_w7d20(**kwargs): """ MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs) def mobilenetv3_small_wd2(**kwargs): """ MobileNetV3 Small 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.5, model_name="mobilenetv3_small_wd2", **kwargs) def mobilenetv3_small_w3d4(**kwargs): """ MobileNetV3 Small 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=0.75, model_name="mobilenetv3_small_w3d4", **kwargs) def mobilenetv3_small_w1(**kwargs): """ MobileNetV3 Small 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=1.0, model_name="mobilenetv3_small_w1", **kwargs) def mobilenetv3_small_w5d4(**kwargs): """ MobileNetV3 Small 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="small", width_scale=1.25, model_name="mobilenetv3_small_w5d4", **kwargs) def mobilenetv3_large_w7d20(**kwargs): """ MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs) def mobilenetv3_large_wd2(**kwargs): """ MobileNetV3 Large 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.5, model_name="mobilenetv3_large_wd2", **kwargs) def mobilenetv3_large_w3d4(**kwargs): """ MobileNetV3 Large 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=0.75, model_name="mobilenetv3_large_w3d4", **kwargs) def mobilenetv3_large_w1(**kwargs): """ MobileNetV3 Large 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=1.0, model_name="mobilenetv3_large_w1", **kwargs) def mobilenetv3_large_w5d4(**kwargs): """ MobileNetV3 Large 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenetv3(version="large", width_scale=1.25, model_name="mobilenetv3_large_w5d4", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ mobilenetv3_small_w7d20, mobilenetv3_small_wd2, mobilenetv3_small_w3d4, mobilenetv3_small_w1, mobilenetv3_small_w5d4, mobilenetv3_large_w7d20, mobilenetv3_large_wd2, mobilenetv3_large_w3d4, mobilenetv3_large_w1, mobilenetv3_large_w5d4, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != mobilenetv3_small_w7d20 or weight_count == 2159600) assert (model != mobilenetv3_small_wd2 or weight_count == 2288976) assert (model != mobilenetv3_small_w3d4 or weight_count == 2581312) assert (model != mobilenetv3_small_w1 or weight_count == 2945288) assert (model != mobilenetv3_small_w5d4 or weight_count == 3643632) assert (model != mobilenetv3_large_w7d20 or weight_count == 2943080) assert (model != mobilenetv3_large_wd2 or weight_count == 3334896) assert (model != mobilenetv3_large_w3d4 or weight_count == 4263496) assert (model != mobilenetv3_large_w1 or weight_count == 5481752) assert (model != mobilenetv3_large_w5d4 or weight_count == 7459144) if __name__ == "__main__": _test()
20,951
34.572156
118
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/lffd.py
""" LFFD for face detection, implemented in TensorFlow. Original paper: 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633. """ __all__ = ['LFFD', 'lffd20x5s320v2_widerface', 'lffd25x8s560v1_widerface'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv3x3, conv1x1_block, conv3x3_block, Concurrent, MultiOutputSequential, ParallelConcurent,\ is_channels_first from .resnet import ResUnit from .preresnet import PreResUnit class LffdDetectionBranch(nn.Layer): """ LFFD specific detection branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_bias : bool Whether the layer uses a bias vector. use_bn : bool Whether to use BatchNorm layer. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, use_bias, use_bn, data_format="channels_last", **kwargs): super(LffdDetectionBranch, self).__init__(**kwargs) self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=in_channels, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="conv1") self.conv2 = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn, activation=None, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) return x class LffdDetectionBlock(nn.Layer): """ LFFD specific detection block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. use_bias : bool Whether the layer uses a bias vector. use_bn : bool Whether to use BatchNorm layer. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, mid_channels, use_bias, use_bn, data_format="channels_last", **kwargs): super(LffdDetectionBlock, self).__init__(**kwargs) self.conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="conv") self.branches = Concurrent( data_format=data_format, name="branches") self.branches.add(LffdDetectionBranch( in_channels=mid_channels, out_channels=4, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="bbox_branch")) self.branches.add(LffdDetectionBranch( in_channels=mid_channels, out_channels=2, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="score_branch")) def call(self, x, training=None): x = self.conv(x, training=training) x = self.branches(x, training=training) return x class LFFD(tf.keras.Model): """ LFFD model from 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633. Parameters: ---------- enc_channels : list of int Number of output channels for each encoder stage. dec_channels : int Number of output channels for each decoder stage. init_block_channels : int Number of output channels for the initial encoder unit. layers : list of int Number of units in each encoder stage. int_bends : list of int Number of internal bends for each encoder stage. use_preresnet : bool Whether to use PreResnet backbone instead of ResNet. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (640, 640) Spatial size of the expected input image. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, enc_channels, dec_channels, init_block_channels, layers, int_bends, use_preresnet, in_channels=3, in_size=(640, 640), data_format="channels_last", **kwargs): super(LFFD, self).__init__(**kwargs) self.in_size = in_size self.data_format = data_format unit_class = PreResUnit if use_preresnet else ResUnit use_bias = True use_bn = False self.encoder = MultiOutputSequential(return_last=False) self.encoder.add(conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, strides=2, padding=0, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(enc_channels): layers_per_stage = layers[i] int_bends_per_stage = int_bends[i] stage = MultiOutputSequential(multi_output=False, dual_output=True, name="stage{}".format(i + 1)) stage.add(conv3x3( in_channels=in_channels, out_channels=channels_per_stage, strides=2, padding=0, use_bias=use_bias, data_format=data_format, name="trans{}".format(i + 1))) for j in range(layers_per_stage): unit = unit_class( in_channels=channels_per_stage, out_channels=channels_per_stage, strides=1, use_bias=use_bias, use_bn=use_bn, bottleneck=False, data_format=data_format, name="unit{}".format(j + 1)) if layers_per_stage - j <= int_bends_per_stage: unit.do_output = True stage.add(unit) final_activ = nn.ReLU(name="final_activ") final_activ.do_output = True stage.add(final_activ) stage.do_output2 = True in_channels = channels_per_stage self.encoder.add(stage) self.decoder = ParallelConcurent() k = 0 for i, channels_per_stage in enumerate(enc_channels): layers_per_stage = layers[i] int_bends_per_stage = int_bends[i] for j in range(layers_per_stage): if layers_per_stage - j <= int_bends_per_stage: self.decoder.add(LffdDetectionBlock( in_channels=channels_per_stage, mid_channels=dec_channels, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="unit{}".format(k + 1))) k += 1 self.decoder.add(LffdDetectionBlock( in_channels=channels_per_stage, mid_channels=dec_channels, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="unit{}".format(k + 1))) k += 1 def call(self, x, training=None): x = self.encoder(x, training=training) x = self.decoder(x, training=training) return x def get_lffd(blocks, use_preresnet, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create LFFD model with specific parameters. Parameters: ---------- blocks : int Number of blocks. use_preresnet : bool Whether to use PreResnet backbone instead of ResNet. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if blocks == 20: layers = [3, 1, 1, 1, 1] enc_channels = [64, 64, 64, 128, 128] int_bends = [0, 0, 0, 0, 0] elif blocks == 25: layers = [4, 2, 1, 3] enc_channels = [64, 64, 128, 128] int_bends = [1, 1, 0, 2] else: raise ValueError("Unsupported LFFD with number of blocks: {}".format(blocks)) dec_channels = 128 init_block_channels = 64 net = LFFD( enc_channels=enc_channels, dec_channels=dec_channels, init_block_channels=init_block_channels, layers=layers, int_bends=int_bends, use_preresnet=use_preresnet, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def lffd20x5s320v2_widerface(**kwargs): """ LFFD-320-20L-5S-V2 model for WIDER FACE from 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_lffd(blocks=20, use_preresnet=True, model_name="lffd20x5s320v2_widerface", **kwargs) def lffd25x8s560v1_widerface(**kwargs): """ LFFD-560-25L-8S-V1 model for WIDER FACE from 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_lffd(blocks=25, use_preresnet=False, model_name="lffd25x8s560v1_widerface", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" in_size = (640, 640) pretrained = False models = [ (lffd20x5s320v2_widerface, 5), (lffd25x8s560v1_widerface, 8), ] for model, num_outs in models: net = model(pretrained=pretrained) batch = 14 x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else (batch, in_size[0], in_size[1], 3)) y = net(x) assert (len(y) == num_outs) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != lffd20x5s320v2_widerface or weight_count == 1520606) assert (model != lffd25x8s560v1_widerface or weight_count == 2290608) if __name__ == "__main__": _test()
12,116
32.658333
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/sepreresnet.py
""" SE-PreResNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['SEPreResNet', 'sepreresnet10', 'sepreresnet12', 'sepreresnet14', 'sepreresnet16', 'sepreresnet18', 'sepreresnet26', 'sepreresnetbc26b', 'sepreresnet34', 'sepreresnetbc38b', 'sepreresnet50', 'sepreresnet50b', 'sepreresnet101', 'sepreresnet101b', 'sepreresnet152', 'sepreresnet152b', 'sepreresnet200', 'sepreresnet200b', 'SEPreResUnit'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1, SEBlock, SimpleSequential, flatten from .preresnet import PreResBlock, PreResBottleneck, PreResInitBlock, PreResActivation class SEPreResUnit(nn.Layer): """ SE-PreResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, bottleneck, conv1_stride, data_format="channels_last", **kwargs): super(SEPreResUnit, self).__init__(**kwargs) self.resize_identity = (in_channels != out_channels) or (strides != 1) if bottleneck: self.body = PreResBottleneck( in_channels=in_channels, out_channels=out_channels, strides=strides, conv1_stride=conv1_stride, data_format=data_format, name="body") else: self.body = PreResBlock( in_channels=in_channels, out_channels=out_channels, strides=strides, data_format=data_format, name="body") self.se = SEBlock( channels=out_channels, data_format=data_format, name="se") if self.resize_identity: self.identity_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, strides=strides, data_format=data_format, name="identity_conv") def call(self, x, training=None): identity = x x, x_pre_activ = self.body(x, training=training) x = self.se(x) if self.resize_identity: identity = self.identity_conv(x_pre_activ) x = x + identity return x class SEPreResNet(tf.keras.Model): """ SE-PreResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(SEPreResNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(SEPreResUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, bottleneck=bottleneck, conv1_stride=conv1_stride, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(PreResActivation( in_channels=in_channels, data_format=data_format, name="final_block")) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_sepreresnet(blocks, bottleneck=None, conv1_stride=True, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create SE-PreResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] elif blocks == 269: layers = [3, 30, 48, 8] else: raise ValueError("Unsupported SE-PreResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = SEPreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def sepreresnet10(**kwargs): """ SE-PreResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=10, model_name="sepreresnet10", **kwargs) def sepreresnet12(**kwargs): """ SE-PreResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=12, model_name="sepreresnet12", **kwargs) def sepreresnet14(**kwargs): """ SE-PreResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=14, model_name="sepreresnet14", **kwargs) def sepreresnet16(**kwargs): """ SE-PreResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=16, model_name="sepreresnet16", **kwargs) def sepreresnet18(**kwargs): """ SE-PreResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=18, model_name="sepreresnet18", **kwargs) def sepreresnet26(**kwargs): """ SE-PreResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=26, bottleneck=False, model_name="sepreresnet26", **kwargs) def sepreresnetbc26b(**kwargs): """ SE-PreResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc26b", **kwargs) def sepreresnet34(**kwargs): """ SE-PreResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=34, model_name="sepreresnet34", **kwargs) def sepreresnetbc38b(**kwargs): """ SE-PreResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc38b", **kwargs) def sepreresnet50(**kwargs): """ SE-PreResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=50, model_name="sepreresnet50", **kwargs) def sepreresnet50b(**kwargs): """ SE-PreResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=50, conv1_stride=False, model_name="sepreresnet50b", **kwargs) def sepreresnet101(**kwargs): """ SE-PreResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=101, model_name="sepreresnet101", **kwargs) def sepreresnet101b(**kwargs): """ SE-PreResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=101, conv1_stride=False, model_name="sepreresnet101b", **kwargs) def sepreresnet152(**kwargs): """ SE-PreResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=152, model_name="sepreresnet152", **kwargs) def sepreresnet152b(**kwargs): """ SE-PreResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=152, conv1_stride=False, model_name="sepreresnet152b", **kwargs) def sepreresnet200(**kwargs): """ SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs) def sepreresnet200b(**kwargs): """ SE-PreResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sepreresnet(blocks=200, conv1_stride=False, model_name="sepreresnet200b", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K pretrained = False models = [ sepreresnet10, sepreresnet12, sepreresnet14, sepreresnet16, sepreresnet18, sepreresnet26, sepreresnetbc26b, sepreresnet34, sepreresnetbc38b, sepreresnet50, sepreresnet50b, sepreresnet101, sepreresnet101b, sepreresnet152, sepreresnet152b, sepreresnet200, sepreresnet200b, ] for model in models: net = model(pretrained=pretrained) batch = 14 x = tf.random.normal((batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != sepreresnet10 or weight_count == 5461668) assert (model != sepreresnet12 or weight_count == 5536232) assert (model != sepreresnet14 or weight_count == 5833840) assert (model != sepreresnet16 or weight_count == 7022976) assert (model != sepreresnet18 or weight_count == 11776928) assert (model != sepreresnet26 or weight_count == 18092188) assert (model != sepreresnetbc26b or weight_count == 17388424) assert (model != sepreresnet34 or weight_count == 21957204) assert (model != sepreresnetbc38b or weight_count == 24019064) assert (model != sepreresnet50 or weight_count == 28080472) assert (model != sepreresnet50b or weight_count == 28080472) assert (model != sepreresnet101 or weight_count == 49319320) assert (model != sepreresnet101b or weight_count == 49319320) assert (model != sepreresnet152 or weight_count == 66814296) assert (model != sepreresnet152b or weight_count == 66814296) assert (model != sepreresnet200 or weight_count == 71828312) assert (model != sepreresnet200b or weight_count == 71828312) if __name__ == "__main__": _test()
19,413
33.361062
119
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/resnext.py
""" ResNeXt for ImageNet-1K, implemented in TensorFlow. Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. """ __all__ = ['ResNeXt', 'resnext14_16x4d', 'resnext14_32x2d', 'resnext14_32x4d', 'resnext26_16x4d', 'resnext26_32x2d', 'resnext26_32x4d', 'resnext38_32x4d', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d', 'ResNeXtBottleneck', 'ResNeXtUnit'] import os import math import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1_block, conv3x3_block, SimpleSequential, flatten from .resnet import ResInitBlock class ResNeXtBottleneck(nn.Layer): """ ResNeXt bottleneck block for residual path in ResNeXt unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. bottleneck_factor : int, default 4 Bottleneck factor. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, cardinality, bottleneck_width, bottleneck_factor=4, data_format="channels_last", **kwargs): super(ResNeXtBottleneck, self).__init__(**kwargs) mid_channels = out_channels // bottleneck_factor D = int(math.floor(mid_channels * (bottleneck_width / 64.0))) group_width = cardinality * D self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=group_width, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=group_width, out_channels=group_width, strides=strides, groups=cardinality, data_format=data_format, name="conv2") self.conv3 = conv1x1_block( in_channels=group_width, out_channels=out_channels, activation=None, data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) return x class ResNeXtUnit(nn.Layer): """ ResNeXt unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, cardinality, bottleneck_width, data_format="channels_last", **kwargs): super(ResNeXtUnit, self).__init__(**kwargs) self.resize_identity = (in_channels != out_channels) or (strides != 1) self.body = ResNeXtBottleneck( in_channels=in_channels, out_channels=out_channels, strides=strides, cardinality=cardinality, bottleneck_width=bottleneck_width, data_format=data_format, name="body") if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, data_format=data_format, name="identity_conv") self.activ = nn.ReLU() def call(self, x, training=None): if self.resize_identity: identity = self.identity_conv(x, training=training) else: identity = x x = self.body(x, training=training) x = x + identity x = self.activ(x) return x class ResNeXt(tf.keras.Model): """ ResNeXt model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(ResNeXt, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(ResInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(ResNeXtUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, cardinality=cardinality, bottleneck_width=bottleneck_width, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_resnext(blocks, cardinality, bottleneck_width, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create ResNeXt model with specific parameters. Parameters: ---------- blocks : int Number of blocks. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if blocks == 14: layers = [1, 1, 1, 1] elif blocks == 26: layers = [2, 2, 2, 2] elif blocks == 38: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] else: raise ValueError("Unsupported ResNeXt with number of blocks: {}".format(blocks)) assert (sum(layers) * 3 + 2 == blocks) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = ResNeXt( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def resnext14_16x4d(**kwargs): """ ResNeXt-14 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext(blocks=14, cardinality=16, bottleneck_width=4, model_name="resnext14_16x4d", **kwargs) def resnext14_32x2d(**kwargs): """ ResNeXt-14 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext(blocks=14, cardinality=32, bottleneck_width=2, model_name="resnext14_32x2d", **kwargs) def resnext14_32x4d(**kwargs): """ ResNeXt-14 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext(blocks=14, cardinality=32, bottleneck_width=4, model_name="resnext14_32x4d", **kwargs) def resnext26_16x4d(**kwargs): """ ResNeXt-26 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext(blocks=26, cardinality=16, bottleneck_width=4, model_name="resnext26_16x4d", **kwargs) def resnext26_32x2d(**kwargs): """ ResNeXt-26 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext(blocks=26, cardinality=32, bottleneck_width=2, model_name="resnext26_32x2d", **kwargs) def resnext26_32x4d(**kwargs): """ ResNeXt-26 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext(blocks=26, cardinality=32, bottleneck_width=4, model_name="resnext26_32x4d", **kwargs) def resnext38_32x4d(**kwargs): """ ResNeXt-38 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext(blocks=38, cardinality=32, bottleneck_width=4, model_name="resnext38_32x4d", **kwargs) def resnext50_32x4d(**kwargs): """ ResNeXt-50 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="resnext50_32x4d", **kwargs) def resnext101_32x4d(**kwargs): """ ResNeXt-101 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="resnext101_32x4d", **kwargs) def resnext101_64x4d(**kwargs): """ ResNeXt-101 (64x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="resnext101_64x4d", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K pretrained = False models = [ resnext14_16x4d, resnext14_32x2d, resnext14_32x4d, resnext26_16x4d, resnext26_32x2d, resnext26_32x4d, resnext38_32x4d, resnext50_32x4d, resnext101_32x4d, resnext101_64x4d, ] for model in models: net = model(pretrained=pretrained) batch = 14 x = tf.random.normal((batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnext14_16x4d or weight_count == 7127336) assert (model != resnext14_32x2d or weight_count == 7029416) assert (model != resnext14_32x4d or weight_count == 9411880) assert (model != resnext26_16x4d or weight_count == 10119976) assert (model != resnext26_32x2d or weight_count == 9924136) assert (model != resnext26_32x4d or weight_count == 15389480) assert (model != resnext38_32x4d or weight_count == 21367080) assert (model != resnext50_32x4d or weight_count == 25028904) assert (model != resnext101_32x4d or weight_count == 44177704) assert (model != resnext101_64x4d or weight_count == 83455272) if __name__ == "__main__": _test()
16,041
32.560669
119
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/jasper.py
""" Jasper/DR for ASR, implemented in TensorFlow. Original paper: 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. """ __all__ = ['Jasper', 'jasper5x3', 'jasper10x4', 'jasper10x5', 'get_jasper', 'MaskConv1d', 'NemoAudioReader', 'NemoMelSpecExtractor', 'CtcDecoder'] import os import numpy as np import tensorflow as tf import tensorflow.keras.layers as nn from tensorflow.python.keras import initializers from tensorflow.python.keras.engine.input_spec import InputSpec from .common import get_activation_layer, Conv1d, BatchNorm, DualPathSequential, DualPathParallelConcurent,\ is_channels_first class NemoAudioReader(object): """ Audio Reader from NVIDIA NEMO toolkit. Parameters: ---------- desired_audio_sample_rate : int, default 16000 Desired audio sample rate. trunc_value : int or None, default None Value to truncate. """ def __init__(self, desired_audio_sample_rate=16000): super(NemoAudioReader, self).__init__() self.desired_audio_sample_rate = desired_audio_sample_rate def read_from_file(self, audio_file_path): """ Read audio from file. Parameters: ---------- audio_file_path : str Path to audio file. Returns: ------- np.array Audio data. """ from soundfile import SoundFile with SoundFile(audio_file_path, "r") as data: sample_rate = data.samplerate audio_data = data.read(dtype="float32") audio_data = audio_data.transpose() if sample_rate != self.desired_audio_sample_rate: from librosa.core import resample as lr_resample audio_data = lr_resample(y=audio_data, orig_sr=sample_rate, target_sr=self.desired_audio_sample_rate) if audio_data.ndim >= 2: audio_data = np.mean(audio_data, axis=1) return audio_data def read_from_files(self, audio_file_paths): """ Read audios from files. Parameters: ---------- audio_file_paths : list of str Paths to audio files. Returns: ------- list of np.array Audio data. """ assert (type(audio_file_paths) in (list, tuple)) audio_data_list = [] for audio_file_path in audio_file_paths: audio_data = self.read_from_file(audio_file_path) audio_data_list.append(audio_data) return audio_data_list class NemoMelSpecExtractor(nn.Layer): """ Mel-Spectrogram Extractor from NVIDIA NEMO toolkit. Parameters: ---------- sample_rate : int, default 16000 Sample rate of the input audio data. window_size_sec : float, default 0.02 Size of window for FFT in seconds. window_stride_sec : float, default 0.01 Stride of window for FFT in seconds. n_fft : int, default 512 Length of FT window. n_filters : int, default 64 Number of Mel spectrogram freq bins. preemph : float, default 0.97 Amount of pre emphasis to add to audio. dither : float, default 1.0e-05 Amount of white-noise dithering. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, sample_rate=16000, window_size_sec=0.02, window_stride_sec=0.01, n_fft=512, n_filters=64, preemph=0.97, dither=1.0e-05, data_format="channels_last", **kwargs): super(NemoMelSpecExtractor, self).__init__(**kwargs) self.data_format = data_format self.log_zero_guard_value = 2 ** -24 win_length = int(window_size_sec * sample_rate) self.hop_length = int(window_stride_sec * sample_rate) self.n_filters = n_filters from scipy import signal as scipy_signal from librosa import stft as librosa_stft window_arr = scipy_signal.hann(win_length, sym=True) self.stft = lambda x: librosa_stft( x, n_fft=n_fft, hop_length=self.hop_length, win_length=win_length, window=window_arr, center=True) self.window_arr_shape = window_arr.shape self.dither = dither self.preemph = preemph self.pad_align = 16 from librosa.filters import mel as librosa_mel self.fb_arr = librosa_mel( sample_rate, n_fft, n_mels=n_filters, fmin=0, fmax=(sample_rate / 2)) def build(self, input_shape): self.window = self.add_weight( shape=self.window_arr_shape, name="window", initializer=initializers.get("zeros"), regularizer=None, constraint=None, dtype=self.dtype, trainable=False) self.fb = self.add_weight( shape=np.expand_dims(self.fb_arr, axis=0).shape, name="fb", initializer=initializers.get("zeros"), regularizer=None, constraint=None, dtype=self.dtype, trainable=False) channel_axis = (1 if is_channels_first(self.data_format) else len(input_shape) - 1) axes = {} for i in range(1, len(input_shape)): if i != channel_axis: axes[i] = input_shape[i] self.input_spec = InputSpec(ndim=len(input_shape), axes=axes) self.built = True def call(self, x, training=None): xs = x.numpy() x_eps = 1e-5 batch = len(xs) y_len = np.zeros((batch,), dtype=np.long) ys = [] for i, xi in enumerate(xs): y_len[i] = np.ceil(float(len(xi)) / self.hop_length).astype(np.long) if self.dither > 0: xi += self.dither * np.random.randn(*xi.shape) xi = np.concatenate((xi[:1], xi[1:] - self.preemph * xi[:-1]), axis=0) yi = self.stft(xi) yi = np.abs(yi) yi = np.square(yi) yi = np.matmul(self.fb_arr, yi) yi = np.log(yi + self.log_zero_guard_value) assert (yi.shape[1] != 1) yi_mean = yi.mean(axis=1) yi_std = yi.std(axis=1) yi_std += x_eps yi = (yi - np.expand_dims(yi_mean, axis=-1)) / np.expand_dims(yi_std, axis=-1) ys.append(yi) channels = ys[0].shape[0] x_len_max = max([yj.shape[-1] for yj in ys]) y = np.zeros((batch, channels, x_len_max), dtype=np.float32) for i, yi in enumerate(ys): x_len_i = y_len[i] y[i, :, :x_len_i] = yi[:, :x_len_i] pad_rem = x_len_max % self.pad_align if pad_rem != 0: y = np.pad(y, ((0, 0), (0, 0), (0, self.pad_align - pad_rem))) if not is_channels_first(self.data_format): y = y.swapaxes(1, 2) x = tf.convert_to_tensor(y) x_len = tf.convert_to_tensor(y_len) return x, x_len def calc_flops(self, x): assert (x.shape[0] == 1) num_flops = x[0].size num_macs = 0 return num_flops, num_macs class CtcDecoder(object): """ CTC decoder (to decode a sequence of labels to words). Parameters: ---------- vocabulary : list of str Vocabulary of the dataset. """ def __init__(self, vocabulary): super().__init__() self.blank_id = len(vocabulary) self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))]) def __call__(self, predictions): """ Decode a sequence of labels to words. Parameters: ---------- predictions : np.array of int or list of list of int Tensor with predicted labels. Returns: ------- list of str Words. """ hypotheses = [] for prediction in predictions: decoded_prediction = [] previous = self.blank_id for p in prediction: if (p != previous or previous == self.blank_id) and p != self.blank_id: decoded_prediction.append(p) previous = p hypothesis = "".join([self.labels_map[c] for c in decoded_prediction]) hypotheses.append(hypothesis) return hypotheses def conv1d1(in_channels, out_channels, strides=1, groups=1, use_bias=False, data_format="channels_last", **kwargs): """ 1-dim kernel version of the 1D convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int, default 1 Strides of the convolution. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return Conv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, strides=strides, groups=groups, use_bias=use_bias, data_format=data_format, **kwargs) class MaskConv1d(Conv1d): """ Masked 1D convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 1 int Convolution window size. strides : int or tuple/list of 1 int Strides of the convolution. padding : int or tuple/list of 1 int, default 0 Padding value for convolution layer. dilation : int or tuple/list of 1 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_mask : bool, default True Whether to use mask. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding=0, dilation=1, groups=1, use_bias=False, use_mask=True, data_format="channels_last", **kwargs): super(MaskConv1d, self).__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, dilation=dilation, groups=groups, use_bias=use_bias, data_format=data_format, **kwargs) self.use_mask = use_mask self.data_format = data_format if self.use_mask: self.kernel_size = kernel_size[0] if isinstance(kernel_size, (list, tuple)) else kernel_size self.strides = strides[0] if isinstance(strides, (list, tuple)) else strides self.padding = padding[0] if isinstance(padding, (list, tuple)) else padding self.dilation = dilation[0] if isinstance(dilation, (list, tuple)) else dilation def call(self, x, x_len): if self.use_mask: if is_channels_first(self.data_format): max_len = x.shape[2] mask = tf.expand_dims(tf.cast(tf.linspace(0, max_len - 1, max_len), tf.int64), 0) <\ tf.expand_dims(x_len, -1) mask = tf.broadcast_to(tf.expand_dims(mask, 1), x.shape) x = tf.where(mask, x, tf.zeros(x.shape)) else: max_len = x.shape[1] mask = tf.expand_dims(tf.cast(tf.linspace(0, max_len - 1, max_len), tf.int64), 0) <\ tf.expand_dims(x_len, -1) mask = tf.broadcast_to(tf.expand_dims(mask, -1), x.shape) x = tf.where(mask, x, tf.zeros(x.shape)) x_len = (x_len + 2 * self.padding - self.dilation * (self.kernel_size - 1) - 1) // self.strides + 1 x = super(MaskConv1d, self).call(x) return x, x_len def mask_conv1d1(in_channels, out_channels, strides=1, groups=1, use_bias=False, data_format="channels_last", **kwargs): """ Masked 1-dim kernel version of the 1D convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int, default 1 Strides of the convolution. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return MaskConv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, strides=strides, groups=groups, use_bias=use_bias, data_format=data_format, **kwargs) class MaskConvBlock1d(nn.Layer): """ Masked 1D convolution block with batch normalization, activation, and dropout. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int Convolution window size. strides : int Strides of the convolution. padding : int Padding value for convolution layer. dilation : int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, dilation=1, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation="relu", dropout_rate=0.0, data_format="channels_last", **kwargs): super(MaskConvBlock1d, self).__init__(**kwargs) self.activate = (activation is not None) self.use_bn = use_bn self.use_dropout = (dropout_rate != 0.0) self.conv = MaskConv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, dilation=dilation, groups=groups, use_bias=use_bias, data_format=data_format, name="conv") if self.use_bn: self.bn = BatchNorm( epsilon=bn_eps, data_format=data_format, name="bn") if self.activate: self.activ = get_activation_layer(activation, name="activ") if self.use_dropout: self.dropout = nn.Dropout( rate=dropout_rate, name="dropout") def call(self, x, x_len, training=None): x, x_len = self.conv(x, x_len) if self.use_bn: x = self.bn(x, training=training) if self.activate: x = self.activ(x) if self.use_dropout: x = self.dropout(x, training=training) return x, x_len def mask_conv1d1_block(in_channels, out_channels, strides=1, padding=0, data_format="channels_last", **kwargs): """ 1-dim kernel version of the masked 1D convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int, default 1 Strides of the convolution. padding : int, default 0 Padding value for convolution layer. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return MaskConvBlock1d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, strides=strides, padding=padding, data_format=data_format, **kwargs) class ChannelShuffle1d(nn.Layer): """ 1D version of the channel shuffle layer. Parameters: ---------- channels : int Number of channels. groups : int Number of groups. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, groups, data_format="channels_last", **kwargs): super(ChannelShuffle1d, self).__init__(**kwargs) assert (channels % groups == 0) self.groups = groups self.data_format = data_format def call(self, x, training=None): x_shape = x.get_shape().as_list() if is_channels_first(self.data_format): channels = x_shape[1] seq_len = x_shape[2] else: seq_len = x_shape[1] channels = x_shape[2] assert (channels % self.groups == 0) channels_per_group = channels // self.groups if is_channels_first(self.data_format): x = tf.reshape(x, shape=(-1, self.groups, channels_per_group, seq_len)) x = tf.transpose(x, perm=(0, 2, 1, 3)) x = tf.reshape(x, shape=(-1, channels, seq_len)) else: x = tf.reshape(x, shape=(-1, seq_len, self.groups, channels_per_group)) x = tf.transpose(x, perm=(0, 1, 3, 2)) x = tf.reshape(x, shape=(-1, seq_len, channels)) return x def __repr__(self): s = "{name}(groups={groups})" return s.format( name=self.__class__.__name__, groups=self.groups) class DwsConvBlock1d(nn.Layer): """ Depthwise version of the 1D standard convolution block with batch normalization, activation, dropout, and channel shuffle. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int Convolution window size. strides : int Strides of the convolution. padding : int Padding value for convolution layer. dilation : int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, dilation=1, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation="relu", dropout_rate=0.0, data_format="channels_last", **kwargs): super(DwsConvBlock1d, self).__init__(**kwargs) self.activate = (activation is not None) self.use_bn = use_bn self.use_dropout = (dropout_rate != 0.0) self.use_channel_shuffle = (groups > 1) self.dw_conv = MaskConv1d( in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, strides=strides, padding=padding, dilation=dilation, groups=in_channels, use_bias=use_bias, data_format=data_format, name="dw_conv") self.pw_conv = mask_conv1d1( in_channels=in_channels, out_channels=out_channels, groups=groups, use_bias=use_bias, data_format=data_format, name="pw_conv") if self.use_channel_shuffle: self.shuffle = ChannelShuffle1d( channels=out_channels, groups=groups, data_format=data_format, name="shuffle") if self.use_bn: self.bn = BatchNorm( epsilon=bn_eps, data_format=data_format, name="bn") if self.activate: self.activ = get_activation_layer(activation, name="activ") if self.use_dropout: self.dropout = nn.Dropout( rate=dropout_rate, name="dropout") def call(self, x, x_len, training=None): x, x_len = self.dw_conv(x, x_len) x, x_len = self.pw_conv(x, x_len) if self.use_channel_shuffle: x = self.shuffle(x) if self.use_bn: x = self.bn(x, training=training) if self.activate: x = self.activ(x) if self.use_dropout: x = self.dropout(x, training=training) return x, x_len class JasperUnit(nn.Layer): """ Jasper unit with residual connection. Parameters: ---------- in_channels : int or list of int Number of input channels. out_channels : int Number of output channels. kernel_size : int Convolution window size. bn_eps : float Small float added to variance in Batch norm. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. repeat : int Count of body convolution blocks. use_dw : bool Whether to use depthwise block. use_dr : bool Whether to use dense residual scheme. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, bn_eps, dropout_rate, repeat, use_dw, use_dr, data_format="channels_last", **kwargs): super(JasperUnit, self).__init__(**kwargs) self.use_dropout = (dropout_rate != 0.0) self.use_dr = use_dr block_class = DwsConvBlock1d if use_dw else MaskConvBlock1d if self.use_dr: self.identity_block = DualPathParallelConcurent(name="identity_block") for i, dense_in_channels_i in enumerate(in_channels): self.identity_block.add(mask_conv1d1_block( in_channels=dense_in_channels_i, out_channels=out_channels, bn_eps=bn_eps, dropout_rate=0.0, activation=None, data_format=data_format, name="block{}".format(i + 1))) in_channels = in_channels[-1] else: self.identity_block = mask_conv1d1_block( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps, dropout_rate=0.0, activation=None, data_format=data_format, name="identity_block") self.body = DualPathSequential(name="body") for i in range(repeat): activation = "relu" if i < repeat - 1 else None dropout_rate_i = dropout_rate if i < repeat - 1 else 0.0 self.body.add(block_class( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=1, padding=(kernel_size // 2), bn_eps=bn_eps, dropout_rate=dropout_rate_i, activation=activation, data_format=data_format, name="block{}".format(i + 1))) in_channels = out_channels self.activ = nn.ReLU() if self.use_dropout: self.dropout = nn.Dropout( rate=dropout_rate, name="dropout") def call(self, x, x_len, training=None): if self.use_dr: x_len, y, y_len = x_len if type(x_len) is tuple else (x_len, None, None) y = [x] if y is None else y + [x] y_len = [x_len] if y_len is None else y_len + [x_len] identity, _ = self.identity_block(y, y_len, training=training) identity = tf.stack(identity, axis=1) identity = tf.math.reduce_sum(identity, axis=1) else: identity, _ = self.identity_block(x, x_len, training=training) x, x_len = self.body(x, x_len, training=training) x = x + identity x = self.activ(x) if self.use_dropout: x = self.dropout(x, training=training) if self.use_dr: return x, (x_len, y, y_len) else: return x, x_len class JasperFinalBlock(nn.Layer): """ Jasper specific final block. Parameters: ---------- in_channels : int Number of input channels. channels : list of int Number of output channels for each block. kernel_sizes : list of int Kernel sizes for each block. bn_eps : float Small float added to variance in Batch norm. dropout_rates : list of int Dropout rates for each block. use_dw : bool Whether to use depthwise block. use_dr : bool Whether to use dense residual scheme. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, channels, kernel_sizes, bn_eps, dropout_rates, use_dw, use_dr, data_format="channels_last", **kwargs): super(JasperFinalBlock, self).__init__(**kwargs) self.use_dr = use_dr conv1_class = DwsConvBlock1d if use_dw else MaskConvBlock1d self.conv1 = conv1_class( in_channels=in_channels, out_channels=channels[-2], kernel_size=kernel_sizes[-2], strides=1, padding=(2 * kernel_sizes[-2] // 2 - 1), dilation=2, bn_eps=bn_eps, dropout_rate=dropout_rates[-2], data_format=data_format, name="conv1") self.conv2 = MaskConvBlock1d( in_channels=channels[-2], out_channels=channels[-1], kernel_size=kernel_sizes[-1], strides=1, padding=(kernel_sizes[-1] // 2), bn_eps=bn_eps, dropout_rate=dropout_rates[-1], data_format=data_format, name="conv2") def call(self, x, x_len, training=None): if self.use_dr: x_len = x_len[0] x, x_len = self.conv1(x, x_len, training=training) x, x_len = self.conv2(x, x_len, training=training) return x, x_len class Jasper(tf.keras.Model): """ Jasper/DR/QuartzNet model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- channels : list of int Number of output channels for each unit and initial/final block. kernel_sizes : list of int Kernel sizes for each unit and initial/final block. bn_eps : float Small float added to variance in Batch norm. dropout_rates : list of int Dropout rates for each unit and initial/final block. repeat : int Count of body convolution blocks. use_dw : bool Whether to use depthwise block. use_dr : bool Whether to use dense residual scheme. from_audio : bool, default True Whether to treat input as audio instead of Mel-specs. dither : float, default 0.0 Amount of white-noise dithering. return_text : bool, default False Whether to return text instead of logits. vocabulary : list of str or None, default None Vocabulary of the dataset. in_channels : int, default 64 Number of input channels (audio features). classes : int, default 29 Number of classification classes (number of graphemes). data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, kernel_sizes, bn_eps, dropout_rates, repeat, use_dw, use_dr, from_audio=True, dither=0.0, return_text=False, vocabulary=None, in_channels=64, classes=29, data_format="channels_last", **kwargs): super(Jasper, self).__init__(**kwargs) self.in_size = in_channels self.in_channels = in_channels self.classes = classes self.vocabulary = vocabulary self.data_format = data_format self.from_audio = from_audio self.return_text = return_text if self.from_audio: self.preprocessor = NemoMelSpecExtractor( dither=dither, data_format=data_format, name="preprocessor") self.features = DualPathSequential(name="features") init_block_class = DwsConvBlock1d if use_dw else MaskConvBlock1d self.features.add(init_block_class( in_channels=in_channels, out_channels=channels[0], kernel_size=kernel_sizes[0], strides=2, padding=(kernel_sizes[0] // 2), bn_eps=bn_eps, dropout_rate=dropout_rates[0], data_format=data_format, name="init_block")) in_channels = channels[0] in_channels_list = [] for i, (out_channels, kernel_size, dropout_rate) in \ enumerate(zip(channels[1:-2], kernel_sizes[1:-2], dropout_rates[1:-2])): in_channels_list += [in_channels] self.features.add(JasperUnit( in_channels=(in_channels_list if use_dr else in_channels), out_channels=out_channels, kernel_size=kernel_size, bn_eps=bn_eps, dropout_rate=dropout_rate, repeat=repeat, use_dw=use_dw, use_dr=use_dr, data_format=data_format, name="unit{}".format(i + 1))) in_channels = out_channels self.features.add(JasperFinalBlock( in_channels=in_channels, channels=channels, kernel_sizes=kernel_sizes, bn_eps=bn_eps, dropout_rates=dropout_rates, use_dw=use_dw, use_dr=use_dr, data_format=data_format, name="final_block")) in_channels = channels[-1] self.output1 = conv1d1( in_channels=in_channels, out_channels=classes, use_bias=True, data_format=data_format, name="output1") if self.return_text: self.ctc_decoder = CtcDecoder(vocabulary=vocabulary) def call(self, x, x_len=None, training=None): if x_len is None: assert (type(x) in (list, tuple)) x, x_len = x if self.from_audio: x, x_len = self.preprocessor(x, training=training) x, x_len = self.features(x, x_len, training=training) x = self.output1(x) if self.return_text: greedy_predictions = x.swapaxes(1, 2).log_softmax(dim=-1).argmax(dim=-1, keepdim=False).asnumpy() return self.ctc_decoder(greedy_predictions) else: return x, x_len def get_jasper(version, use_dw=False, use_dr=False, bn_eps=1e-3, vocabulary=None, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create Jasper/DR/QuartzNet model with specific parameters. Parameters: ---------- version : tuple of str Model type and configuration. use_dw : bool, default False Whether to use depthwise block. use_dr : bool, default False Whether to use dense residual scheme. bn_eps : float, default 1e-3 Small float added to variance in Batch norm. vocabulary : list of str or None, default None Vocabulary of the dataset. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ import numpy as np blocks, repeat = tuple(map(int, version[1].split("x"))) main_stage_repeat = blocks // 5 model_type = version[0] if model_type == "jasper": channels_per_stage = [256, 256, 384, 512, 640, 768, 896, 1024] kernel_sizes_per_stage = [11, 11, 13, 17, 21, 25, 29, 1] dropout_rates_per_stage = [0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4] elif model_type == "quartznet": channels_per_stage = [256, 256, 256, 512, 512, 512, 512, 1024] kernel_sizes_per_stage = [33, 33, 39, 51, 63, 75, 87, 1] dropout_rates_per_stage = [0.0] * 8 else: raise ValueError("Unsupported Jasper family model type: {}".format(model_type)) stage_repeat = np.full((8,), 1) stage_repeat[1:-2] *= main_stage_repeat channels = sum([[a] * r for (a, r) in zip(channels_per_stage, stage_repeat)], []) kernel_sizes = sum([[a] * r for (a, r) in zip(kernel_sizes_per_stage, stage_repeat)], []) dropout_rates = sum([[a] * r for (a, r) in zip(dropout_rates_per_stage, stage_repeat)], []) net = Jasper( channels=channels, kernel_sizes=kernel_sizes, bn_eps=bn_eps, dropout_rates=dropout_rates, repeat=repeat, use_dw=use_dw, use_dr=use_dr, vocabulary=vocabulary, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file seq_len = 100 x_shape = (1, seq_len * 640) if net.from_audio else ( (1, net.in_size, seq_len) if is_channels_first(net.data_format) else (1, seq_len, net.in_size)) x = tf.random.normal(x_shape) x_len = tf.convert_to_tensor(np.array([seq_len], np.long)) net(x, x_len) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def jasper5x3(**kwargs): """ Jasper 5x3 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_jasper(version=("jasper", "5x3"), model_name="jasper5x3", **kwargs) def jasper10x4(**kwargs): """ Jasper 10x4 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_jasper(version=("jasper", "10x4"), model_name="jasper10x4", **kwargs) def jasper10x5(**kwargs): """ Jasper 10x5 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_jasper(version=("jasper", "10x5"), model_name="jasper10x5", **kwargs) def _test(): import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False from_audio = True # from_audio = False audio_features = 64 classes = 29 models = [ jasper5x3, jasper10x4, jasper10x5, ] for model in models: net = model( in_channels=audio_features, classes=classes, from_audio=from_audio, pretrained=pretrained, data_format=data_format) batch = 3 aud_scale = 640 if from_audio else 1 seq_len = np.random.randint(150, 250, batch) * aud_scale seq_len_max = seq_len.max() + 2 x_shape = (batch, seq_len_max) if from_audio else ( (batch, audio_features, seq_len_max) if is_channels_first(data_format) else (batch, seq_len_max, audio_features)) x = tf.random.normal(shape=x_shape) x_len = tf.convert_to_tensor(seq_len.astype(np.long)) y, y_len = net(x, x_len) assert (y.shape.as_list()[0] == batch) classes_id = 1 if is_channels_first(data_format) else 2 seq_id = 2 if is_channels_first(data_format) else 1 assert (y.shape.as_list()[classes_id] == net.classes) if from_audio: assert (y.shape.as_list()[seq_id] in range(seq_len_max // aud_scale * 2, seq_len_max // aud_scale * 2 + 9)) else: assert (y.shape.as_list()[seq_id] in [seq_len_max // 2, seq_len_max // 2 + 1]) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != jasper5x3 or weight_count == 107681053) assert (model != jasper10x4 or weight_count == 261393693) assert (model != jasper10x5 or weight_count == 322286877) if __name__ == "__main__": _test()
39,745
32.176962
119
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/resneta.py
""" ResNet(A) with average downsampling for ImageNet-1K, implemented in TensorFlow. Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. """ __all__ = ['ResNetA', 'resneta10', 'resnetabc14b', 'resneta18', 'resneta50b', 'resneta101b', 'resneta152b'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1_block, AvgPool2d, SimpleSequential, is_channels_first from .resnet import ResBlock, ResBottleneck from .senet import SEInitBlock class ResADownBlock(nn.Layer): """ ResNet(A) downsample block for the identity branch of a residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. dilation : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer in bottleneck. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, dilation=1, data_format="channels_last", **kwargs): super(ResADownBlock, self).__init__(**kwargs) self.pool = AvgPool2d( pool_size=(strides if dilation == 1 else 1), strides=(strides if dilation == 1 else 1), ceil_mode=True, data_format=data_format, name="pool") self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=None, data_format=data_format, name="conv") def call(self, x, training=None): x = self.pool(x) x = self.conv(x, training=training) return x class ResAUnit(nn.Layer): """ ResNet(A) unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for the second convolution layer in bottleneck. dilation : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer in bottleneck. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, padding=1, dilation=1, bottleneck=True, conv1_stride=False, data_format="channels_last", **kwargs): super(ResAUnit, self).__init__(**kwargs) self.resize_identity = (in_channels != out_channels) or (strides != 1) if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, strides=strides, padding=padding, dilation=dilation, conv1_stride=conv1_stride, data_format=data_format, name="body") else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, strides=strides, data_format=data_format, name="body") if self.resize_identity: self.identity_block = ResADownBlock( in_channels=in_channels, out_channels=out_channels, strides=strides, dilation=dilation, data_format=data_format, name="identity_block") self.activ = nn.ReLU() def call(self, x, training=None): if self.resize_identity: identity = self.identity_block(x, training=training) else: identity = x x = self.body(x, training=training) x = x + identity x = self.activ(x) return x class ResNetA(tf.keras.Model): """ ResNet(A) with average downsampling model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. dilated : bool, default False Whether to use dilation. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, dilated=False, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(ResNetA, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(SEInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): if dilated: strides = 2 if ((j == 0) and (i != 0) and (i < 2)) else 1 dilation = (2 ** max(0, i - 1 - int(j == 0))) else: strides = 2 if (j == 0) and (i != 0) else 1 dilation = 1 stage.add(ResAUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, padding=dilation, dilation=dilation, bottleneck=bottleneck, conv1_stride=conv1_stride, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.GlobalAvgPool2D( data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = self.output1(x) return x def get_resneta(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create ResNet(A) with average downsampling model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported ResNet(A) with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = ResNetA( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def resneta10(**kwargs): """ ResNet(A)-10 with average downsampling model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resneta(blocks=10, model_name="resneta10", **kwargs) def resnetabc14b(**kwargs): """ ResNet(A)-BC-14b with average downsampling model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resneta(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetabc14b", **kwargs) def resneta18(**kwargs): """ ResNet(A)-18 with average downsampling model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resneta(blocks=18, model_name="resneta18", **kwargs) def resneta50b(**kwargs): """ ResNet(A)-50 with average downsampling model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resneta(blocks=50, conv1_stride=False, model_name="resneta50b", **kwargs) def resneta101b(**kwargs): """ ResNet(A)-101 with average downsampling model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resneta(blocks=101, conv1_stride=False, model_name="resneta101b", **kwargs) def resneta152b(**kwargs): """ ResNet(A)-152 with average downsampling model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resneta(blocks=152, conv1_stride=False, model_name="resneta152b", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False models = [ resneta10, resnetabc14b, resneta18, resneta50b, resneta101b, resneta152b, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resneta10 or weight_count == 5438024) assert (model != resnetabc14b or weight_count == 10084168) assert (model != resneta18 or weight_count == 11708744) assert (model != resneta50b or weight_count == 25576264) assert (model != resneta101b or weight_count == 44568392) assert (model != resneta152b or weight_count == 60212040) if __name__ == "__main__": _test()
15,634
33.667406
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/resnesta.py
""" ResNeSt(A) with average downsampling for ImageNet-1K, implemented in TensorFlow. Original paper: 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. """ __all__ = ['ResNeStA', 'resnestabc14', 'resnesta18', 'resnestabc26', 'resnesta50', 'resnesta101', 'resnesta152', 'resnesta200', 'resnesta269', 'ResNeStADownBlock'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1_block, conv3x3_block, saconv3x3_block, AvgPool2d, SimpleSequential, is_channels_first from .senet import SEInitBlock class ResNeStABlock(nn.Layer): """ Simple ResNeSt(A) block for residual path in ResNeSt(A) unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, use_bias=False, use_bn=True, data_format="channels_last", **kwargs): super(ResNeStABlock, self).__init__(**kwargs) self.resize = (strides > 1) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="conv1") if self.resize: self.pool = AvgPool2d( pool_size=3, strides=strides, padding=1, data_format=data_format, name="pool") self.conv2 = saconv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn, activation=None, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) if self.resize: x = self.pool(x) x = self.conv2(x, training=training) return x class ResNeStABottleneck(nn.Layer): """ ResNeSt(A) bottleneck block for residual path in ResNeSt(A) unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. bottleneck_factor : int, default 4 Bottleneck factor. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, bottleneck_factor=4, data_format="channels_last", **kwargs): super(ResNeStABottleneck, self).__init__(**kwargs) self.resize = (strides > 1) mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") self.conv2 = saconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="conv2") if self.resize: self.pool = AvgPool2d( pool_size=3, strides=strides, padding=1, data_format=data_format, name="pool") self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None, data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) if self.resize: x = self.pool(x) x = self.conv3(x, training=training) return x class ResNeStADownBlock(nn.Layer): """ ResNeSt(A) downsample block for the identity branch of a residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, data_format="channels_last", **kwargs): super(ResNeStADownBlock, self).__init__(**kwargs) self.pool = AvgPool2d( pool_size=strides, strides=strides, ceil_mode=True, data_format=data_format, name="pool") self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=None, data_format=data_format, name="conv") def call(self, x, training=None): x = self.pool(x) x = self.conv(x, training=training) return x class ResNeStAUnit(nn.Layer): """ ResNeSt(A) unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, bottleneck=True, data_format="channels_last", **kwargs): super(ResNeStAUnit, self).__init__(**kwargs) self.resize_identity = (in_channels != out_channels) or (strides != 1) if bottleneck: self.body = ResNeStABottleneck( in_channels=in_channels, out_channels=out_channels, strides=strides, data_format=data_format, name="body") else: self.body = ResNeStABlock( in_channels=in_channels, out_channels=out_channels, strides=strides, data_format=data_format, name="body") if self.resize_identity: self.identity_block = ResNeStADownBlock( in_channels=in_channels, out_channels=out_channels, strides=strides, data_format=data_format, name="identity_block") self.activ = nn.ReLU() def call(self, x, training=None): if self.resize_identity: identity = self.identity_block(x, training=training) else: identity = x x = self.body(x, training=training) x = x + identity x = self.activ(x) return x class ResNeStA(tf.keras.Model): """ ResNeSt(A) with average downsampling model from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. dropout_rate : float, default 0.0 Fraction of the input units to drop. Must be a number between 0 and 1. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, bottleneck, dropout_rate=0.0, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(ResNeStA, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(SEInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(ResNeStAUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, bottleneck=bottleneck, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.GlobalAvgPool2D( data_format=data_format, name="final_pool")) self.output1 = SimpleSequential(name="output1") if dropout_rate > 0.0: self.output1.add(nn.Dropout( rate=dropout_rate, name="output1/dropout")) self.output1.add(nn.Dense( units=classes, input_dim=in_channels, name="output1/fc")) def call(self, x, training=None): x = self.features(x, training=training) x = self.output1(x) return x def get_resnesta(blocks, bottleneck=None, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create ResNeSt(A) with average downsampling model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] elif blocks == 269: layers = [3, 30, 48, 8] else: raise ValueError("Unsupported ResNeSt(A) with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if blocks >= 101: init_block_channels *= 2 if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = ResNeStA( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def resnestabc14(**kwargs): """ ResNeSt(A)-BC-14 with average downsampling model from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnesta(blocks=14, bottleneck=True, model_name="resnestabc14", **kwargs) def resnesta18(**kwargs): """ ResNeSt(A)-18 with average downsampling model from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnesta(blocks=18, model_name="resnesta18", **kwargs) def resnestabc26(**kwargs): """ ResNeSt(A)-BC-26 with average downsampling model from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnesta(blocks=26, bottleneck=True, model_name="resnestabc26", **kwargs) def resnesta50(**kwargs): """ ResNeSt(A)-50 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnesta(blocks=50, model_name="resnesta50", **kwargs) def resnesta101(**kwargs): """ ResNeSt(A)-101 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnesta(blocks=101, model_name="resnesta101", **kwargs) def resnesta152(**kwargs): """ ResNeSt(A)-152 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnesta(blocks=152, model_name="resnesta152", **kwargs) def resnesta200(in_size=(256, 256), **kwargs): """ ResNeSt(A)-200 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- in_size : tuple of two ints, default (256, 256) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnesta(blocks=200, in_size=in_size, dropout_rate=0.2, model_name="resnesta200", **kwargs) def resnesta269(in_size=(320, 320), **kwargs): """ ResNeSt(A)-269 with average downsampling model with stride at the second convolution in bottleneck block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- in_size : tuple of two ints, default (320, 320) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnesta(blocks=269, in_size=in_size, dropout_rate=0.2, model_name="resnesta269", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False models = [ (resnestabc14, 224), (resnesta18, 224), (resnestabc26, 224), (resnesta50, 224), (resnesta101, 224), (resnesta152, 224), (resnesta200, 256), (resnesta269, 320), ] for model, size in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, size, size) if is_channels_first(data_format) else (batch, size, size, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnestabc14 or weight_count == 10611688) assert (model != resnesta18 or weight_count == 12763784) assert (model != resnestabc26 or weight_count == 17069448) assert (model != resnesta50 or weight_count == 27483240) assert (model != resnesta101 or weight_count == 48275016) assert (model != resnesta152 or weight_count == 65316040) assert (model != resnesta200 or weight_count == 70201544) assert (model != resnesta269 or weight_count == 110929480) if __name__ == "__main__": _test()
19,800
32.561017
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/senet.py
""" SENet for ImageNet-1K, implemented in TensorFlow. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['SENet', 'senet16', 'senet28', 'senet40', 'senet52', 'senet103', 'senet154', 'SEInitBlock'] import os import math import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1_block, conv3x3_block, SEBlock, MaxPool2d, SimpleSequential, flatten class SENetBottleneck(nn.Layer): """ SENet bottleneck block for residual path in SENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, cardinality, bottleneck_width, data_format="channels_last", **kwargs): super(SENetBottleneck, self).__init__(**kwargs) mid_channels = out_channels // 4 D = int(math.floor(mid_channels * (bottleneck_width / 64.0))) group_width = cardinality * D group_width2 = group_width // 2 self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=group_width2, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=group_width2, out_channels=group_width, strides=strides, groups=cardinality, data_format=data_format, name="conv2") self.conv3 = conv1x1_block( in_channels=group_width, out_channels=out_channels, activation=None, data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) return x class SENetUnit(nn.Layer): """ SENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. identity_conv3x3 : bool, default False Whether to use 3x3 convolution in the identity link. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, cardinality, bottleneck_width, identity_conv3x3, data_format="channels_last", **kwargs): super(SENetUnit, self).__init__(**kwargs) self.resize_identity = (in_channels != out_channels) or (strides != 1) self.body = SENetBottleneck( in_channels=in_channels, out_channels=out_channels, strides=strides, cardinality=cardinality, bottleneck_width=bottleneck_width, data_format=data_format, name="body") self.se = SEBlock( channels=out_channels, data_format=data_format, name="se") if self.resize_identity: if identity_conv3x3: self.identity_conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, data_format=data_format, name="identity_conv") else: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, data_format=data_format, name="identity_conv") self.activ = nn.ReLU() def call(self, x, training=None): if self.resize_identity: identity = self.identity_conv(x, training=training) else: identity = x x = self.body(x, training=training) x = self.se(x) x = x + identity x = self.activ(x) return x class SEInitBlock(nn.Layer): """ SENet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(SEInitBlock, self).__init__(**kwargs) mid_channels = out_channels // 2 self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, strides=2, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="conv2") self.conv3 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, data_format=data_format, name="conv3") self.pool = MaxPool2d( pool_size=3, strides=2, padding=1, data_format=data_format, name="pool") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) x = self.pool(x) return x class SENet(tf.keras.Model): """ SENet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(SENet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(SEInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) identity_conv3x3 = (i != 0) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(SENetUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, cardinality=cardinality, bottleneck_width=bottleneck_width, identity_conv3x3=identity_conv3x3, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = SimpleSequential(name="output1") self.output1.add(nn.Dropout( rate=0.2, name="dropout")) self.output1.add(nn.Dense( units=classes, input_dim=in_channels, name="fc")) def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_senet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create SENet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if blocks == 16: layers = [1, 1, 1, 1] cardinality = 32 elif blocks == 28: layers = [2, 2, 2, 2] cardinality = 32 elif blocks == 40: layers = [3, 3, 3, 3] cardinality = 32 elif blocks == 52: layers = [3, 4, 6, 3] cardinality = 32 elif blocks == 103: layers = [3, 4, 23, 3] cardinality = 32 elif blocks == 154: layers = [3, 8, 36, 3] cardinality = 64 else: raise ValueError("Unsupported SENet with number of blocks: {}".format(blocks)) bottleneck_width = 4 init_block_channels = 128 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = SENet( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def senet16(**kwargs): """ SENet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_senet(blocks=16, model_name="senet16", **kwargs) def senet28(**kwargs): """ SENet-28 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_senet(blocks=28, model_name="senet28", **kwargs) def senet40(**kwargs): """ SENet-40 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_senet(blocks=40, model_name="senet40", **kwargs) def senet52(**kwargs): """ SENet-52 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_senet(blocks=52, model_name="senet52", **kwargs) def senet103(**kwargs): """ SENet-103 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_senet(blocks=103, model_name="senet103", **kwargs) def senet154(**kwargs): """ SENet-154 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_senet(blocks=154, model_name="senet154", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K pretrained = False models = [ senet16, senet28, senet40, senet52, senet103, senet154, ] for model in models: net = model(pretrained=pretrained) batch = 14 x = tf.random.normal((batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != senet16 or weight_count == 31366168) assert (model != senet28 or weight_count == 36453768) assert (model != senet40 or weight_count == 41541368) assert (model != senet52 or weight_count == 44659416) assert (model != senet103 or weight_count == 60963096) assert (model != senet154 or weight_count == 115088984) if __name__ == "__main__": _test()
15,060
30.574423
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/simplepose_coco.py
""" SimplePose for COCO Keypoint, implemented in TensorFlow. Original paper: 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. """ __all__ = ['SimplePose', 'simplepose_resnet18_coco', 'simplepose_resnet50b_coco', 'simplepose_resnet101b_coco', 'simplepose_resnet152b_coco', 'simplepose_resneta50b_coco', 'simplepose_resneta101b_coco', 'simplepose_resneta152b_coco'] import os import tensorflow as tf from .common import DeconvBlock, conv1x1, HeatmapMaxDetBlock, SimpleSequential, is_channels_first from .resnet import resnet18, resnet50b, resnet101b, resnet152b from .resneta import resneta50b, resneta101b, resneta152b class SimplePose(tf.keras.Model): """ SimplePose model from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. channels : list of int Number of output channels for each decoder unit. return_heatmap : bool, default False Whether to return only heatmap. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (256, 192) Spatial size of the expected input image. keypoints : int, default 17 Number of keypoints. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, backbone, backbone_out_channels, channels, return_heatmap=False, in_channels=3, in_size=(256, 192), keypoints=17, data_format="channels_last", **kwargs): super(SimplePose, self).__init__(**kwargs) assert (in_channels == 3) self.in_size = in_size self.keypoints = keypoints self.return_heatmap = return_heatmap self.data_format = data_format self.backbone = backbone self.backbone._name = "backbone" self.decoder = SimpleSequential(name="decoder") in_channels = backbone_out_channels for i, out_channels in enumerate(channels): self.decoder.add(DeconvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=4, strides=2, padding=1, data_format=data_format, name="unit{}".format(i + 1))) in_channels = out_channels self.decoder.add(conv1x1( in_channels=in_channels, out_channels=keypoints, use_bias=True, data_format=data_format, name="final_block")) self.heatmap_max_det = HeatmapMaxDetBlock( data_format=data_format, name="heatmap_max_det") def call(self, x, training=None): x = self.backbone(x, training=training) heatmap = self.decoder(x, training=training) if self.return_heatmap or not tf.executing_eagerly(): return heatmap else: keypoints = self.heatmap_max_det(heatmap) return keypoints def get_simplepose(backbone, backbone_out_channels, keypoints, model_name=None, data_format="channels_last", pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create SimplePose model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. keypoints : int Number of keypoints. model_name : str or None, default None Model name for loading pretrained model. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ channels = [256, 256, 256] net = SimplePose( backbone=backbone, backbone_out_channels=backbone_out_channels, channels=channels, keypoints=keypoints, data_format=data_format, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def simplepose_resnet18_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs): """ SimplePose model on the base of ResNet-18 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnet18(pretrained=pretrained_backbone, data_format=data_format).features del backbone.children[-1] return get_simplepose(backbone=backbone, backbone_out_channels=512, keypoints=keypoints, model_name="simplepose_resnet18_coco", data_format=data_format, **kwargs) def simplepose_resnet50b_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs): """ SimplePose model on the base of ResNet-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnet50b(pretrained=pretrained_backbone, data_format=data_format).features del backbone.children[-1] return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resnet50b_coco", data_format=data_format, **kwargs) def simplepose_resnet101b_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs): """ SimplePose model on the base of ResNet-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnet101b(pretrained=pretrained_backbone, data_format=data_format).features del backbone.children[-1] return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resnet101b_coco", data_format=data_format, **kwargs) def simplepose_resnet152b_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs): """ SimplePose model on the base of ResNet-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnet152b(pretrained=pretrained_backbone, data_format=data_format).features del backbone.children[-1] return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resnet152b_coco", data_format=data_format, **kwargs) def simplepose_resneta50b_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs): """ SimplePose model on the base of ResNet(A)-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resneta50b(pretrained=pretrained_backbone, data_format=data_format).features del backbone.children[-1] return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resneta50b_coco", data_format=data_format, **kwargs) def simplepose_resneta101b_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs): """ SimplePose model on the base of ResNet(A)-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resneta101b(pretrained=pretrained_backbone, data_format=data_format).features del backbone.children[-1] return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resneta101b_coco", data_format=data_format, **kwargs) def simplepose_resneta152b_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs): """ SimplePose model on the base of ResNet(A)-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resneta152b(pretrained=pretrained_backbone, data_format=data_format).features del backbone.children[-1] return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_resneta152b_coco", data_format=data_format, **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" in_size = (256, 192) keypoints = 17 return_heatmap = False pretrained = False models = [ simplepose_resnet18_coco, simplepose_resnet50b_coco, simplepose_resnet101b_coco, simplepose_resnet152b_coco, simplepose_resneta50b_coco, simplepose_resneta101b_coco, simplepose_resneta152b_coco, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else (batch, in_size[0], in_size[1], 3)) y = net(x) assert (y.shape[0] == batch) if return_heatmap: if is_channels_first(data_format): assert ((y.shape[1] == keypoints) and (y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4)) else: assert ((y.shape[3] == keypoints) and (y.shape[1] == x.shape[1] // 4) and (y.shape[2] == x.shape[2] // 4)) else: assert ((y.shape[1] == keypoints) and (y.shape[2] == 3)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != simplepose_resnet18_coco or weight_count == 15376721) assert (model != simplepose_resnet50b_coco or weight_count == 33999697) assert (model != simplepose_resnet101b_coco or weight_count == 52991825) assert (model != simplepose_resnet152b_coco or weight_count == 68635473) assert (model != simplepose_resneta50b_coco or weight_count == 34018929) assert (model != simplepose_resneta101b_coco or weight_count == 53011057) assert (model != simplepose_resneta152b_coco or weight_count == 68654705) if __name__ == "__main__": _test()
15,180
40.252717
118
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/vovnet.py
""" VoVNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. """ __all__ = ['VoVNet', 'vovnet27s', 'vovnet39', 'vovnet57'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1_block, conv3x3_block, SequentialConcurrent, MaxPool2d, SimpleSequential, flatten,\ is_channels_first class VoVUnit(nn.Layer): """ VoVNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. branch_channels : int Number of output channels for each branch. num_branches : int Number of branches. resize : bool Whether to use resize block. use_residual : bool Whether to use residual block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, branch_channels, num_branches, resize, use_residual, data_format="channels_last", **kwargs): super(VoVUnit, self).__init__(**kwargs) self.resize = resize self.use_residual = use_residual if self.resize: self.pool = MaxPool2d( pool_size=3, strides=2, ceil_mode=True, data_format=data_format, name="pool") self.branches = SequentialConcurrent( data_format=data_format, name="branches") branch_in_channels = in_channels for i in range(num_branches): self.branches.add(conv3x3_block( in_channels=branch_in_channels, out_channels=branch_channels, data_format=data_format, name="branch{}".format(i + 1))) branch_in_channels = branch_channels self.concat_conv = conv1x1_block( in_channels=(in_channels + num_branches * branch_channels), out_channels=out_channels, data_format=data_format, name="concat_conv") def call(self, x, training=None): if self.resize: x = self.pool(x) if self.use_residual: identity = x x = self.branches(x, training=training) x = self.concat_conv(x, training=training) if self.use_residual: x = x + identity return x class VoVInitBlock(nn.Layer): """ VoVNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(VoVInitBlock, self).__init__(**kwargs) mid_channels = out_channels // 2 self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, strides=2, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="conv2") self.conv3 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, strides=2, data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) return x class VoVNet(tf.keras.Model): """ VoVNet model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. Parameters: ---------- channels : list of list of int Number of output channels for each unit. branch_channels : list of list of int Number of branch output channels for each unit. num_branches : int Number of branches for the each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, branch_channels, num_branches, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(VoVNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format init_block_channels = 128 self.features = SimpleSequential(name="features") self.features.add(VoVInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): use_residual = (j != 0) resize = (j == 0) and (i != 0) stage.add(VoVUnit( in_channels=in_channels, out_channels=out_channels, branch_channels=branch_channels[i][j], num_branches=num_branches, resize=resize, use_residual=use_residual, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_vovnet(blocks, slim=False, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. slim : bool, default False Whether to use a slim model. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if blocks == 27: layers = [1, 1, 1, 1] elif blocks == 39: layers = [1, 1, 2, 2] elif blocks == 57: layers = [1, 1, 4, 3] else: raise ValueError("Unsupported VoVNet with number of blocks: {}".format(blocks)) assert (sum(layers) * 6 + 3 == blocks) num_branches = 5 channels_per_layers = [256, 512, 768, 1024] branch_channels_per_layers = [128, 160, 192, 224] if slim: channels_per_layers = [ci // 2 for ci in channels_per_layers] branch_channels_per_layers = [ci // 2 for ci in branch_channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] branch_channels = [[ci] * li for (ci, li) in zip(branch_channels_per_layers, layers)] net = VoVNet( channels=channels, branch_channels=branch_channels, num_branches=num_branches, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def vovnet27s(**kwargs): """ VoVNet-27-slim model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_vovnet(blocks=27, slim=True, model_name="vovnet27s", **kwargs) def vovnet39(**kwargs): """ VoVNet-39 model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_vovnet(blocks=39, model_name="vovnet39", **kwargs) def vovnet57(**kwargs): """ VoVNet-57 model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,' https://arxiv.org/abs/1904.09730. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_vovnet(blocks=57, model_name="vovnet57", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False models = [ vovnet27s, vovnet39, vovnet57, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != vovnet27s or weight_count == 3525736) assert (model != vovnet39 or weight_count == 22600296) assert (model != vovnet57 or weight_count == 36640296) if __name__ == "__main__": _test()
11,511
31.519774
120
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/espnetv2.py
""" ESPNetv2 for ImageNet-1K, implemented in TensorFlow. Original paper: 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. NB: not ready. """ __all__ = ['ESPNetv2', 'espnetv2_wd2', 'espnetv2_w1', 'espnetv2_w5d4', 'espnetv2_w3d2', 'espnetv2_w2'] import os import math import tensorflow as tf import tensorflow.keras.layers as nn from .common import BatchNorm, PReLU2, conv3x3, conv1x1_block, conv3x3_block, AvgPool2d, SimpleSequential,\ DualPathSequential, flatten, is_channels_first, get_channel_axis class PreActivation(nn.Layer): """ PreResNet like pure pre-activation block without convolution layer. Parameters: ---------- in_channels : int Number of input channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, data_format="channels_last", **kwargs): super(PreActivation, self).__init__(**kwargs) assert (in_channels is not None) self.bn = BatchNorm( data_format=data_format, name="bn") self.activ = PReLU2(in_channels=in_channels, name="activ") def call(self, x, training=None): x = self.bn(x, training=training) x = self.activ(x) return x class ShortcutBlock(nn.Layer): """ ESPNetv2 shortcut block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(ShortcutBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=in_channels, activation=(lambda: PReLU2(in_channels=in_channels, name="activ")), data_format=data_format, name="conv1") self.conv2 = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=None, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) return x class HierarchicalConcurrent(SimpleSequential): """ A container for hierarchical concatenation of blocks with parameters. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(HierarchicalConcurrent, self).__init__(**kwargs) self.axis = get_channel_axis(data_format) def call(self, x, training=None): out = [] y_prev = None for block in self.children: y = block(x, training=training) print(y.shape) if y_prev is not None: y = y + y_prev out.append(y) y_prev = y out = tf.concat(out, axis=self.axis) return out class ESPBlock(nn.Layer): """ ESPNetv2 block (so-called EESP block). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the branch convolution layers. dilations : list of int Dilation values for branches. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, dilations, data_format="channels_last", **kwargs): super(ESPBlock, self).__init__(**kwargs) num_branches = len(dilations) assert (out_channels % num_branches == 0) self.downsample = (strides != 1) mid_channels = out_channels // num_branches # dilations = [1] * len(dilations) self.reduce_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, groups=num_branches, activation=(lambda: PReLU2(in_channels=mid_channels, name="activ")), data_format=data_format, name="reduce_conv") self.branches = HierarchicalConcurrent( data_format=data_format, name="branches") for i in range(num_branches): self.branches.add(conv3x3( in_channels=mid_channels, out_channels=mid_channels, strides=strides, padding=dilations[i], dilation=dilations[i], groups=mid_channels, data_format=data_format, name="branch{}".format(i + 1))) self.merge_conv = conv1x1_block( in_channels=out_channels, out_channels=out_channels, groups=num_branches, activation=None, data_format=data_format, name="merge_conv") self.preactiv = PreActivation( in_channels=out_channels, data_format=data_format, name="preactiv") if not self.downsample: self.activ = PReLU2(in_channels=out_channels, name="activ") def call(self, x, x0, training=None): y = self.reduce_conv(x, training=training) y = self.branches(y, training=training) y = self.preactiv(y, training=training) y = self.merge_conv(y, training=training) if not self.downsample: y = y + x y = self.activ(y) return y, x0 class DownsampleBlock(nn.Layer): """ ESPNetv2 downsample block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. x0_channels : int Number of input channels for shortcut. dilations : list of int Dilation values for branches in EESP block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, x0_channels, dilations, data_format="channels_last", **kwargs): super(DownsampleBlock, self).__init__(**kwargs) self.data_format = data_format inc_channels = out_channels - in_channels # dilations = [1] * len(dilations) self.pool = AvgPool2d( pool_size=3, strides=2, padding=1, ceil_mode=True, data_format=data_format, name="pool") self.eesp = ESPBlock( in_channels=in_channels, out_channels=inc_channels, strides=2, dilations=dilations, data_format=data_format, name="eesp") self.shortcut_block = ShortcutBlock( in_channels=x0_channels, out_channels=out_channels, data_format=data_format, name="shortcut_block") self.activ = PReLU2(in_channels=out_channels, name="activ") def call(self, x, x0, training=None): y1 = self.pool(x) y2, _ = self.eesp(x, None, training=training) x = tf.concat([y1, y2], axis=get_channel_axis(self.data_format)) x0 = self.pool(x0) y3 = self.shortcut_block(x0, training=training) x = x + y3 x = self.activ(x) return x, x0 class ESPInitBlock(nn.Layer): """ ESPNetv2 initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(ESPInitBlock, self).__init__(**kwargs) self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=2, activation=(lambda: PReLU2(in_channels=out_channels, name="activ")), data_format=data_format, name="conv") self.pool = AvgPool2d( pool_size=3, strides=2, padding=1, data_format=data_format, name="pool") def call(self, x, x0, training=None): x = self.conv(x, training=training) x0 = self.pool(x0) return x, x0 class ESPFinalBlock(nn.Layer): """ ESPNetv2 final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. final_groups : int Number of groups in the last convolution layer. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, final_groups, data_format="channels_last", **kwargs): super(ESPFinalBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=in_channels, groups=in_channels, activation=(lambda: PReLU2(in_channels=in_channels, name="activ")), data_format=data_format, name="conv1") self.conv2 = conv1x1_block( in_channels=in_channels, out_channels=out_channels, groups=final_groups, activation=(lambda: PReLU2(in_channels=out_channels, name="activ")), data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) return x class ESPNetv2(tf.keras.Model): """ ESPNetv2 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final unit. final_block_groups : int Number of groups for the final unit. dilations : list of list of list of int Dilation values for branches in each unit. dropout_rate : float, default 0.2 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, final_block_channels, final_block_groups, dilations, dropout_rate=0.2, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(ESPNetv2, self).__init__(**kwargs) self.in_size = in_size self.classes = classes x0_channels = in_channels self.features = DualPathSequential( return_two=False, first_ordinals=0, last_ordinals=2, name="features") self.features.add(ESPInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential(name="stage{}_".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): if j == 0: unit = DownsampleBlock( in_channels=in_channels, out_channels=out_channels, x0_channels=x0_channels, dilations=dilations[i][j], data_format=data_format, name="unit{}".format(j + 1)) else: unit = ESPBlock( in_channels=in_channels, out_channels=out_channels, strides=1, dilations=dilations[i][j], data_format=data_format, name="unit{}".format(j + 1)) stage.add(unit) in_channels = out_channels self.features.add(stage) self.features.add(ESPFinalBlock( in_channels=in_channels, out_channels=final_block_channels, final_groups=final_block_groups, data_format=data_format, name="final_block")) in_channels = final_block_channels self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = SimpleSequential(name="output1") self.output1.add(nn.Dropout( rate=dropout_rate, name="dropout")) self.output1.add(nn.Dense( units=classes, input_dim=in_channels, name="fc")) def call(self, x, training=None): x = self.features(x, x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_espnetv2(width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create ESPNetv2 model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ assert (width_scale <= 2.0) branches = 4 layers = [1, 4, 8, 4] max_dilation_list = [6, 5, 4, 3, 2] max_dilations = [[max_dilation_list[i]] + [max_dilation_list[i + 1]] * (li - 1) for (i, li) in enumerate(layers)] dilations = [[sorted([k + 1 if k < dij else 1 for k in range(branches)]) for dij in di] for di in max_dilations] base_channels = 32 weighed_base_channels = math.ceil(float(math.floor(base_channels * width_scale)) / branches) * branches channels_per_layers = [weighed_base_channels * pow(2, i + 1) for i in range(len(layers))] init_block_channels = base_channels if weighed_base_channels > base_channels else weighed_base_channels final_block_channels = 1024 if width_scale <= 1.5 else 1280 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = ESPNetv2( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, final_block_groups=branches, dilations=dilations, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def espnetv2_wd2(**kwargs): """ ESPNetv2 x0.5 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=0.5, model_name="espnetv2_wd2", **kwargs) def espnetv2_w1(**kwargs): """ ESPNetv2 x1.0 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=1.0, model_name="espnetv2_w1", **kwargs) def espnetv2_w5d4(**kwargs): """ ESPNetv2 x1.25 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=1.25, model_name="espnetv2_w5d4", **kwargs) def espnetv2_w3d2(**kwargs): """ ESPNetv2 x1.5 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=1.5, model_name="espnetv2_w3d2", **kwargs) def espnetv2_w2(**kwargs): """ ESPNetv2 x2.0 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,' https://arxiv.org/abs/1811.11431. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_espnetv2(width_scale=2.0, model_name="espnetv2_w2", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False models = [ espnetv2_wd2, espnetv2_w1, espnetv2_w5d4, espnetv2_w3d2, espnetv2_w2, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != espnetv2_wd2 or weight_count == 1241092) assert (model != espnetv2_w1 or weight_count == 1669592) assert (model != espnetv2_w5d4 or weight_count == 1964832) assert (model != espnetv2_w3d2 or weight_count == 2314120) assert (model != espnetv2_w2 or weight_count == 3497144) if __name__ == "__main__": _test()
20,454
32.260163
118
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/shufflenet.py
""" ShuffleNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. """ __all__ = ['ShuffleNet', 'shufflenet_g1_w1', 'shufflenet_g2_w1', 'shufflenet_g3_w1', 'shufflenet_g4_w1', 'shufflenet_g8_w1', 'shufflenet_g1_w3d4', 'shufflenet_g3_w3d4', 'shufflenet_g1_wd2', 'shufflenet_g3_wd2', 'shufflenet_g1_wd4', 'shufflenet_g3_wd4'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1, conv3x3, depthwise_conv3x3, ChannelShuffle, BatchNorm, MaxPool2d, AvgPool2d,\ SimpleSequential, get_channel_axis, flatten class ShuffleUnit(nn.Layer): """ ShuffleNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. groups : int Number of groups in convolution layers. downsample : bool Whether do downsample. ignore_group : bool Whether ignore group value in the first convolution layer. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, groups, downsample, ignore_group, data_format="channels_last", **kwargs): super(ShuffleUnit, self).__init__(**kwargs) self.data_format = data_format self.downsample = downsample mid_channels = out_channels // 4 if downsample: out_channels -= in_channels self.compress_conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels, groups=(1 if ignore_group else groups), data_format=data_format, name="compress_conv1") self.compress_bn1 = BatchNorm( # in_channels=mid_channels, data_format=data_format, name="compress_bn1") self.c_shuffle = ChannelShuffle( channels=mid_channels, groups=groups, data_format=data_format, name="c_shuffle") self.dw_conv2 = depthwise_conv3x3( channels=mid_channels, strides=(2 if self.downsample else 1), data_format=data_format, name="dw_conv2") self.dw_bn2 = BatchNorm( # in_channels=mid_channels, data_format=data_format, name="dw_bn2") self.expand_conv3 = conv1x1( in_channels=mid_channels, out_channels=out_channels, groups=groups, data_format=data_format, name="expand_conv3") self.expand_bn3 = BatchNorm( # in_channels=out_channels, data_format=data_format, name="expand_bn3") if downsample: self.avgpool = AvgPool2d( pool_size=3, strides=2, padding=1, data_format=data_format, name="avgpool") self.activ = nn.ReLU() def call(self, x, training=None): identity = x x = self.compress_conv1(x) x = self.compress_bn1(x, training=training) x = self.activ(x) x = self.c_shuffle(x) x = self.dw_conv2(x) x = self.dw_bn2(x, training=training) x = self.expand_conv3(x) x = self.expand_bn3(x, training=training) if self.downsample: identity = self.avgpool(identity) x = tf.concat([x, identity], axis=get_channel_axis(self.data_format)) else: x = x + identity x = self.activ(x) return x class ShuffleInitBlock(nn.Layer): """ ShuffleNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(ShuffleInitBlock, self).__init__(**kwargs) self.conv = conv3x3( in_channels=in_channels, out_channels=out_channels, strides=2, data_format=data_format, name="conv") self.bn = BatchNorm( # in_channels=out_channels, data_format=data_format, name="bn") self.activ = nn.ReLU() self.pool = MaxPool2d( pool_size=3, strides=2, padding=1, data_format=data_format, name="pool") def call(self, x, training=None): x = self.conv(x) x = self.bn(x, training=training) x = self.activ(x) x = self.pool(x) return x class ShuffleNet(tf.keras.Model): """ ShuffleNet model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. groups : int Number of groups in convolution layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, groups, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(ShuffleNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(ShuffleInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): downsample = (j == 0) ignore_group = (i == 0) and (j == 0) stage.add(ShuffleUnit( in_channels=in_channels, out_channels=out_channels, groups=groups, downsample=downsample, ignore_group=ignore_group, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = self.output1(x) x = flatten(x, self.data_format) return x def get_shufflenet(groups, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create ShuffleNet model with specific parameters. Parameters: ---------- groups : int Number of groups in convolution layers. width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ init_block_channels = 24 layers = [4, 8, 4] if groups == 1: channels_per_layers = [144, 288, 576] elif groups == 2: channels_per_layers = [200, 400, 800] elif groups == 3: channels_per_layers = [240, 480, 960] elif groups == 4: channels_per_layers = [272, 544, 1088] elif groups == 8: channels_per_layers = [384, 768, 1536] else: raise ValueError("The {} of groups is not supported".format(groups)) channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = int(init_block_channels * width_scale) net = ShuffleNet( channels=channels, init_block_channels=init_block_channels, groups=groups, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def shufflenet_g1_w1(**kwargs): """ ShuffleNet 1x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_shufflenet(groups=1, width_scale=1.0, model_name="shufflenet_g1_w1", **kwargs) def shufflenet_g2_w1(**kwargs): """ ShuffleNet 1x (g=2) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_shufflenet(groups=2, width_scale=1.0, model_name="shufflenet_g2_w1", **kwargs) def shufflenet_g3_w1(**kwargs): """ ShuffleNet 1x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_shufflenet(groups=3, width_scale=1.0, model_name="shufflenet_g3_w1", **kwargs) def shufflenet_g4_w1(**kwargs): """ ShuffleNet 1x (g=4) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_shufflenet(groups=4, width_scale=1.0, model_name="shufflenet_g4_w1", **kwargs) def shufflenet_g8_w1(**kwargs): """ ShuffleNet 1x (g=8) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_shufflenet(groups=8, width_scale=1.0, model_name="shufflenet_g8_w1", **kwargs) def shufflenet_g1_w3d4(**kwargs): """ ShuffleNet 0.75x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_shufflenet(groups=1, width_scale=0.75, model_name="shufflenet_g1_w3d4", **kwargs) def shufflenet_g3_w3d4(**kwargs): """ ShuffleNet 0.75x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_shufflenet(groups=3, width_scale=0.75, model_name="shufflenet_g3_w3d4", **kwargs) def shufflenet_g1_wd2(**kwargs): """ ShuffleNet 0.5x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_shufflenet(groups=1, width_scale=0.5, model_name="shufflenet_g1_wd2", **kwargs) def shufflenet_g3_wd2(**kwargs): """ ShuffleNet 0.5x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_shufflenet(groups=3, width_scale=0.5, model_name="shufflenet_g3_wd2", **kwargs) def shufflenet_g1_wd4(**kwargs): """ ShuffleNet 0.25x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_shufflenet(groups=1, width_scale=0.25, model_name="shufflenet_g1_wd4", **kwargs) def shufflenet_g3_wd4(**kwargs): """ ShuffleNet 0.25x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_shufflenet(groups=3, width_scale=0.25, model_name="shufflenet_g3_wd4", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K pretrained = False models = [ shufflenet_g1_w1, shufflenet_g2_w1, shufflenet_g3_w1, shufflenet_g4_w1, shufflenet_g8_w1, shufflenet_g1_w3d4, shufflenet_g3_w3d4, shufflenet_g1_wd2, shufflenet_g3_wd2, shufflenet_g1_wd4, shufflenet_g3_wd4, ] for model in models: net = model(pretrained=pretrained) batch = 14 x = tf.random.normal((batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != shufflenet_g1_w1 or weight_count == 1531936) assert (model != shufflenet_g2_w1 or weight_count == 1733848) assert (model != shufflenet_g3_w1 or weight_count == 1865728) assert (model != shufflenet_g4_w1 or weight_count == 1968344) assert (model != shufflenet_g8_w1 or weight_count == 2434768) assert (model != shufflenet_g1_w3d4 or weight_count == 975214) assert (model != shufflenet_g3_w3d4 or weight_count == 1238266) assert (model != shufflenet_g1_wd2 or weight_count == 534484) assert (model != shufflenet_g3_wd2 or weight_count == 718324) assert (model != shufflenet_g1_wd4 or weight_count == 209746) assert (model != shufflenet_g3_wd4 or weight_count == 305902) if __name__ == "__main__": _test()
17,521
33.089494
120
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/bamresnet.py
""" BAM-ResNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. """ __all__ = ['BamResNet', 'bam_resnet18', 'bam_resnet34', 'bam_resnet50', 'bam_resnet101', 'bam_resnet152'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1, conv1x1_block, conv3x3_block, BatchNorm, SimpleSequential, flatten,\ is_channels_first from .resnet import ResInitBlock, ResUnit class DenseBlock(nn.Layer): """ Standard dense block with Batch normalization and ReLU activation. Parameters: ---------- in_channels : int Number of input features. out_channels : int Number of output features. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(DenseBlock, self).__init__(**kwargs) self.fc = nn.Dense( units=out_channels, input_dim=in_channels, name="fc") self.bn = BatchNorm( data_format=data_format, name="bn") self.activ = nn.ReLU() def call(self, x, training=None): x = self.fc(x) x = self.bn(x, training=training) x = self.activ(x) return x class ChannelGate(nn.Layer): """ BAM channel gate block. Parameters: ---------- channels : int Number of input/output channels. reduction_ratio : int, default 16 Channel reduction ratio. num_layers : int, default 1 Number of dense blocks. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, reduction_ratio=16, num_layers=1, data_format="channels_last", **kwargs): super(ChannelGate, self).__init__(**kwargs) self.data_format = data_format mid_channels = channels // reduction_ratio self.pool = nn.GlobalAvgPool2D( data_format=data_format, name="pool") self.flatten = nn.Flatten() self.init_fc = DenseBlock( in_channels=channels, out_channels=mid_channels, data_format=data_format, name="init_fc") self.main_fcs = SimpleSequential(name="main_fcs") for i in range(num_layers - 1): self.main_fcs.children.append(DenseBlock( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="fc{}".format(i + 1))) self.final_fc = nn.Dense( units=channels, input_dim=mid_channels, name="final_fc") def call(self, x, training=None): input = x x = self.pool(x) x = self.flatten(x) x = self.init_fc(x) x = self.main_fcs(x, training=training) x = self.final_fc(x) if is_channels_first(self.data_format): x = tf.broadcast_to(tf.expand_dims(tf.expand_dims(x, 2), 3), shape=input.shape) else: x = tf.broadcast_to(tf.expand_dims(tf.expand_dims(x, 1), 2), shape=input.shape) return x class SpatialGate(nn.Layer): """ BAM spatial gate block. Parameters: ---------- channels : int Number of input/output channels. reduction_ratio : int, default 16 Channel reduction ratio. num_dil_convs : int, default 2 Number of dilated convolutions. dilation : int, default 4 Dilation/padding value for corresponding convolutions. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, reduction_ratio=16, num_dil_convs=2, dilation=4, data_format="channels_last", **kwargs): super(SpatialGate, self).__init__(**kwargs) mid_channels = channels // reduction_ratio self.init_conv = conv1x1_block( in_channels=channels, out_channels=mid_channels, strides=1, use_bias=True, data_format=data_format, name="init_conv") self.dil_convs = SimpleSequential(name="dil_convs") for i in range(num_dil_convs): self.dil_convs.children.append(conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, strides=1, padding=dilation, dilation=dilation, use_bias=True, data_format=data_format, name="conv{}".format(i + 1))) self.final_conv = conv1x1( in_channels=mid_channels, out_channels=1, strides=1, use_bias=True, data_format=data_format, name="final_conv") def call(self, x, training=None): input = x x = self.init_conv(x, training=training) x = self.dil_convs(x, training=training) x = self.final_conv(x) x = tf.broadcast_to(x, shape=input.shape) return x class BamBlock(nn.Layer): """ BAM attention block for BAM-ResNet. Parameters: ---------- channels : int Number of input/output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, data_format="channels_last", **kwargs): super(BamBlock, self).__init__(**kwargs) self.ch_att = ChannelGate( channels=channels, data_format=data_format, name="ch_att") self.sp_att = SpatialGate( channels=channels, data_format=data_format, name="sp_att") self.sigmoid = tf.nn.sigmoid def call(self, x, training=None): att = 1 + self.sigmoid(self.ch_att(x, training=training) * self.sp_att(x, training=training)) x = x * att return x class BamResUnit(nn.Layer): """ BAM-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, bottleneck, data_format="channels_last", **kwargs): super(BamResUnit, self).__init__(**kwargs) self.use_bam = (strides != 1) if self.use_bam: self.bam = BamBlock( channels=in_channels, data_format=data_format, name="bam") self.res_unit = ResUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, bottleneck=bottleneck, conv1_stride=False, data_format=data_format, name="res_unit") def call(self, x, training=None): if self.use_bam: x = self.bam(x, training=training) x = self.res_unit(x, training=training) return x class BamResNet(tf.keras.Model): """ BAM-ResNet model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(BamResNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(ResInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(BamResUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, bottleneck=bottleneck, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_resnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create BAM-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. use_se : bool Whether to use SE block. width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if blocks == 18: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] else: raise ValueError("Unsupported BAM-ResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 if blocks < 50: channels_per_layers = [64, 128, 256, 512] bottleneck = False else: channels_per_layers = [256, 512, 1024, 2048] bottleneck = True channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = BamResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def bam_resnet18(**kwargs): """ BAM-ResNet-18 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, model_name="bam_resnet18", **kwargs) def bam_resnet34(**kwargs): """ BAM-ResNet-34 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet(blocks=34, model_name="bam_resnet34", **kwargs) def bam_resnet50(**kwargs): """ BAM-ResNet-50 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet(blocks=50, model_name="bam_resnet50", **kwargs) def bam_resnet101(**kwargs): """ BAM-ResNet-101 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet(blocks=101, model_name="bam_resnet101", **kwargs) def bam_resnet152(**kwargs): """ BAM-ResNet-152 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet(blocks=152, model_name="bam_resnet152", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ bam_resnet18, bam_resnet34, bam_resnet50, bam_resnet101, bam_resnet152, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != bam_resnet18 or weight_count == 11712503) assert (model != bam_resnet34 or weight_count == 21820663) assert (model != bam_resnet50 or weight_count == 25915099) assert (model != bam_resnet101 or weight_count == 44907227) assert (model != bam_resnet152 or weight_count == 60550875) if __name__ == "__main__": _test()
15,973
30.757455
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/centernet.py
""" CenterNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'Objects as Points,' https://arxiv.org/abs/1904.07850. """ __all__ = ['CenterNet', 'centernet_resnet18_voc', 'centernet_resnet18_coco', 'centernet_resnet50b_voc', 'centernet_resnet50b_coco', 'centernet_resnet101b_voc', 'centernet_resnet101b_coco', 'CenterNetHeatmapMaxDet'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import MaxPool2d, conv1x1, conv3x3_block, DeconvBlock, Concurrent, SimpleSequential, is_channels_first from .resnet import resnet18, resnet50b, resnet101b class CenterNetDecoderUnit(nn.Layer): """ CenterNet decoder unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(CenterNetDecoderUnit, self).__init__(**kwargs) self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, use_bias=True, data_format=data_format, name="conv") self.deconv = DeconvBlock( in_channels=out_channels, out_channels=out_channels, kernel_size=4, strides=2, padding=1, data_format=data_format, name="deconv") def call(self, x, training=None): x = self.conv(x, training=training) x = self.deconv(x, training=training) return x class CenterNetHeadBlock(nn.Layer): """ CenterNet simple head block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(CenterNetHeadBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=in_channels, use_bias=True, use_bn=False, data_format=data_format, name="conv1") self.conv2 = conv1x1( in_channels=in_channels, out_channels=out_channels, use_bias=True, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x) x = self.conv2(x) return x class CenterNetHeatmapBlock(nn.Layer): """ CenterNet heatmap block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. do_nms : bool Whether do NMS (or simply clip for training otherwise). data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, do_nms, data_format="channels_last", **kwargs): super(CenterNetHeatmapBlock, self).__init__(**kwargs) self.do_nms = do_nms self.head = CenterNetHeadBlock( in_channels=in_channels, out_channels=out_channels, data_format=data_format, name="head") self.sigmoid = tf.nn.sigmoid if self.do_nms: self.pool = MaxPool2d( pool_size=3, strides=1, padding=1, data_format=data_format, name="pool") def call(self, x, training=None): x = self.head(x) x = self.sigmoid(x) if self.do_nms: y = self.pool(x) x = x * (y.numpy() == x.numpy()) else: eps = 1e-4 x = tf.clip_by_value(x, clip_value_min=eps, clip_value_max=(1.0 - eps)) return x class CenterNetHeatmapMaxDet(nn.Layer): """ CenterNet decoder for heads (heatmap, wh, reg). Parameters: ---------- topk : int, default 40 Keep only `topk` detections. scale : int, default is 4 Downsampling scale factor. max_batch : int, default is 256 Maximal batch size. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, topk=40, scale=4, max_batch=256, data_format="channels_last", **kwargs): super(CenterNetHeatmapMaxDet, self).__init__(**kwargs) self.topk = topk self.scale = scale self.max_batch = max_batch self.data_format = data_format def call(self, x, training=None): import numpy as np x_ = x.numpy() if not is_channels_first(self.data_format): x_ = x_.transpose((0, 3, 1, 2)) heatmap = x_[:, :-4] wh = x_[:, -4:-2] reg = x_[:, -2:] batch, _, out_h, out_w = heatmap.shape heatmap_flat = heatmap.reshape((batch, -1)) indices = np.argsort(heatmap_flat)[:, -self.topk:] scores = np.take_along_axis(heatmap_flat, indices=indices, axis=-1) topk_classes = (indices // (out_h * out_w)).astype(dtype=np.float32) topk_indices = indices % (out_h * out_w) topk_ys = (topk_indices // out_w).astype(dtype=np.float32) topk_xs = (topk_indices % out_w).astype(dtype=np.float32) center = reg.transpose((0, 2, 3, 1)).reshape((batch, -1, 2)) wh = wh.transpose((0, 2, 3, 1)).reshape((batch, -1, 2)) xs = np.take_along_axis(center[:, :, 0], indices=topk_indices, axis=-1) ys = np.take_along_axis(center[:, :, 1], indices=topk_indices, axis=-1) topk_xs = topk_xs + xs topk_ys = topk_ys + ys w = np.take_along_axis(wh[:, :, 0], indices=topk_indices, axis=-1) h = np.take_along_axis(wh[:, :, 1], indices=topk_indices, axis=-1) half_w = 0.5 * w half_h = 0.5 * h bboxes = tf.stack((topk_xs - half_w, topk_ys - half_h, topk_xs + half_w, topk_ys + half_h), axis=-1) bboxes = bboxes * self.scale topk_classes = tf.expand_dims(topk_classes, axis=-1) scores = tf.expand_dims(scores, axis=-1) result = tf.concat((bboxes, topk_classes, scores), axis=-1) return result class CenterNet(tf.keras.Model): """ CenterNet model from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. channels : list of int Number of output channels for each decoder unit. return_heatmap : bool, default False Whether to return only heatmap. topk : int, default 40 Keep only `topk` detections. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (512, 512) Spatial size of the expected input image. classes : int, default 80 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, backbone, backbone_out_channels, channels, return_heatmap=False, topk=40, in_channels=3, in_size=(512, 512), classes=80, data_format="channels_last", **kwargs): super(CenterNet, self).__init__(**kwargs) self.in_size = in_size self.in_channels = in_channels self.return_heatmap = return_heatmap self.data_format = data_format self.backbone = backbone self.backbone._name = "backbone" self.decoder = SimpleSequential(name="decoder") in_channels = backbone_out_channels for i, out_channels in enumerate(channels): self.decoder.add(CenterNetDecoderUnit( in_channels=in_channels, out_channels=out_channels, data_format=data_format, name="unit{}".format(i + 1))) in_channels = out_channels heads = Concurrent( data_format=data_format, name="heads") heads.add(CenterNetHeatmapBlock( in_channels=in_channels, out_channels=classes, do_nms=(not self.return_heatmap), data_format=data_format, name="heapmap_block")) heads.add(CenterNetHeadBlock( in_channels=in_channels, out_channels=2, data_format=data_format, name="wh_block")) heads.add(CenterNetHeadBlock( in_channels=in_channels, out_channels=2, data_format=data_format, name="reg_block")) self.decoder.add(heads) if not self.return_heatmap: self.heatmap_max_det = CenterNetHeatmapMaxDet( topk=topk, scale=4, data_format=data_format, name="heatmap_max_det") def call(self, x, training=None): x = self.backbone(x, training=training) x = self.decoder(x, training=training) if not self.return_heatmap or not tf.executing_eagerly(): x = self.heatmap_max_det(x) return x def get_centernet(backbone, backbone_out_channels, classes, model_name=None, data_format="channels_last", pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create CenterNet model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. classes : int Number of classes. model_name : str or None, default None Model name for loading pretrained model. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. Returns: ------- HybridBlock A network. """ channels = [256, 128, 64] net = CenterNet( backbone=backbone, backbone_out_channels=backbone_out_channels, channels=channels, classes=classes, data_format=data_format, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def centernet_resnet18_voc(pretrained_backbone=False, classes=20, data_format="channels_last", **kwargs): """ CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 20 Number of classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnet18(pretrained=pretrained_backbone).features del backbone.children[-1] return get_centernet(backbone=backbone, backbone_out_channels=512, classes=classes, model_name="centernet_resnet18_voc", data_format=data_format, **kwargs) def centernet_resnet18_coco(pretrained_backbone=False, classes=80, data_format="channels_last", **kwargs): """ CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 80 Number of classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnet18(pretrained=pretrained_backbone).features del backbone.children[-1] return get_centernet(backbone=backbone, backbone_out_channels=512, classes=classes, model_name="centernet_resnet18_coco", data_format=data_format, **kwargs) def centernet_resnet50b_voc(pretrained_backbone=False, classes=20, data_format="channels_last", **kwargs): """ CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 20 Number of classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnet50b(pretrained=pretrained_backbone).features del backbone.children[-1] return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes, model_name="centernet_resnet50b_voc", data_format=data_format, **kwargs) def centernet_resnet50b_coco(pretrained_backbone=False, classes=80, data_format="channels_last", **kwargs): """ CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 80 Number of classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnet50b(pretrained=pretrained_backbone).features del backbone.children[-1] return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes, model_name="centernet_resnet50b_coco", data_format=data_format, **kwargs) def centernet_resnet101b_voc(pretrained_backbone=False, classes=20, data_format="channels_last", **kwargs): """ CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 20 Number of classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnet101b(pretrained=pretrained_backbone).features del backbone.children[-1] return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes, model_name="centernet_resnet101b_voc", data_format=data_format, **kwargs) def centernet_resnet101b_coco(pretrained_backbone=False, classes=80, data_format="channels_last", **kwargs): """ CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,' https://arxiv.org/abs/1904.07850. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 80 Number of classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnet101b(pretrained=pretrained_backbone).features del backbone.children[-1] return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes, model_name="centernet_resnet101b_coco", data_format=data_format, **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" in_size = (512, 512) topk = 40 return_heatmap = False pretrained = False models = [ (centernet_resnet18_voc, 20), (centernet_resnet18_coco, 80), (centernet_resnet50b_voc, 20), (centernet_resnet50b_coco, 80), (centernet_resnet101b_voc, 20), (centernet_resnet101b_coco, 80), ] for model, classes in models: net = model(pretrained=pretrained, topk=topk, in_size=in_size, return_heatmap=return_heatmap, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else (batch, in_size[0], in_size[1], 3)) y = net(x) assert (y.shape[0] == batch) if return_heatmap: if is_channels_first(data_format): assert (y.shape[1] == classes + 4) and (y.shape[2] == x.shape[2] // 4) and ( y.shape[3] == x.shape[3] // 4) else: assert (y.shape[3] == classes + 4) and (y.shape[1] == x.shape[1] // 4) and ( y.shape[2] == x.shape[2] // 4) else: assert (y.shape[1] == topk) and (y.shape[2] == 6) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != centernet_resnet18_voc or weight_count == 14215640) assert (model != centernet_resnet18_coco or weight_count == 14219540) assert (model != centernet_resnet50b_voc or weight_count == 30086104) assert (model != centernet_resnet50b_coco or weight_count == 30090004) assert (model != centernet_resnet101b_voc or weight_count == 49078232) assert (model != centernet_resnet101b_coco or weight_count == 49082132) if __name__ == "__main__": _test()
20,073
35.039497
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/proxylessnas_cub.py
""" ProxylessNAS for CUB-200-2011, implemented in TensorFlow. Original paper: 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. """ __all__ = ['proxylessnas_cpu_cub', 'proxylessnas_gpu_cub', 'proxylessnas_mobile_cub', 'proxylessnas_mobile14_cub'] from .common import is_channels_first from .proxylessnas import get_proxylessnas def proxylessnas_cpu_cub(classes=200, **kwargs): """ ProxylessNAS (CPU) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_proxylessnas(classes=classes, version="cpu", model_name="proxylessnas_cpu_cub", **kwargs) def proxylessnas_gpu_cub(classes=200, **kwargs): """ ProxylessNAS (GPU) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_proxylessnas(classes=classes, version="gpu", model_name="proxylessnas_gpu_cub", **kwargs) def proxylessnas_mobile_cub(classes=200, **kwargs): """ ProxylessNAS (Mobile) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_proxylessnas(classes=classes, version="mobile", model_name="proxylessnas_mobile_cub", **kwargs) def proxylessnas_mobile14_cub(classes=200, **kwargs): """ ProxylessNAS (Mobile-14) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_proxylessnas(classes=classes, version="mobile14", model_name="proxylessnas_mobile14_cub", **kwargs) def _test(): import numpy as np import tensorflow as tf import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False models = [ proxylessnas_cpu_cub, proxylessnas_gpu_cub, proxylessnas_mobile_cub, proxylessnas_mobile14_cub, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 200)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != proxylessnas_cpu_cub or weight_count == 3215248) assert (model != proxylessnas_gpu_cub or weight_count == 5736648) assert (model != proxylessnas_mobile_cub or weight_count == 3055712) assert (model != proxylessnas_mobile14_cub or weight_count == 5423168) if __name__ == "__main__": _test()
4,145
34.741379
120
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/ibnresnet.py
""" IBN-ResNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. """ __all__ = ['IBNResNet', 'ibn_resnet50', 'ibn_resnet101', 'ibn_resnet152'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import Conv2d, BatchNorm, conv1x1_block, conv3x3_block, IBN, SimpleSequential, flatten, is_channels_first from .resnet import ResInitBlock class IBNConvBlock(nn.Layer): """ IBN-Net specific convolution block with BN/IBN normalization and ReLU activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_ibn : bool, default False Whether use Instance-Batch Normalization. activate : bool, default True Whether activate the convolution block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, dilation=1, groups=1, use_bias=False, use_ibn=False, activate=True, data_format="channels_last", **kwargs): super(IBNConvBlock, self).__init__(**kwargs) self.activate = activate self.use_ibn = use_ibn self.conv = Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, dilation=dilation, groups=groups, use_bias=use_bias, data_format=data_format, name="conv") if self.use_ibn: self.ibn = IBN( channels=out_channels, data_format=data_format, name="ibn") else: self.bn = BatchNorm( data_format=data_format, name="bn") if self.activate: self.activ = nn.ReLU() def call(self, x, training=None): x = self.conv(x, training=training) if self.use_ibn: x = self.ibn(x, training=training) else: x = self.bn(x, training=training) if self.activate: x = self.activ(x) return x def ibn_conv1x1_block(in_channels, out_channels, strides=1, groups=1, use_bias=False, use_ibn=False, activate=True, data_format="channels_last", **kwargs): """ 1x1 version of the IBN-Net specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_ibn : bool, default False Whether use Instance-Batch Normalization. activate : bool, default True Whether activate the convolution block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return IBNConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=1, strides=strides, padding=0, groups=groups, use_bias=use_bias, use_ibn=use_ibn, activate=activate, data_format=data_format, **kwargs) class IBNResBottleneck(nn.Layer): """ IBN-ResNet bottleneck block for residual path in IBN-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. conv1_ibn : bool Whether to use IBN normalization in the first convolution layer of the block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, conv1_ibn, data_format="channels_last", **kwargs): super(IBNResBottleneck, self).__init__(**kwargs) mid_channels = out_channels // 4 self.conv1 = ibn_conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_ibn=conv1_ibn, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, strides=strides, data_format=data_format, name="conv2") self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None, data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) return x class IBNResUnit(nn.Layer): """ IBN-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. conv1_ibn : bool Whether to use IBN normalization in the first convolution layer of the block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, conv1_ibn, data_format="channels_last", **kwargs): super(IBNResUnit, self).__init__(**kwargs) self.resize_identity = (in_channels != out_channels) or (strides != 1) self.body = IBNResBottleneck( in_channels=in_channels, out_channels=out_channels, strides=strides, conv1_ibn=conv1_ibn, data_format=data_format, name="body") if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, data_format=data_format, name="identity_conv") self.activ = nn.ReLU() def call(self, x, training=None): if self.resize_identity: identity = self.identity_conv(x, training=training) else: identity = x x = self.body(x, training=training) x = x + identity x = self.activ(x) return x class IBNResNet(tf.keras.Model): """ IBN-ResNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(IBNResNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(ResInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 conv1_ibn = (out_channels < 2048) stage.add(IBNResUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, conv1_ibn=conv1_ibn, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_ibnresnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create IBN-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] else: raise ValueError("Unsupported IBN-ResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = IBNResNet( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def ibn_resnet50(**kwargs): """ IBN-ResNet-50 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_ibnresnet(blocks=50, model_name="ibn_resnet50", **kwargs) def ibn_resnet101(**kwargs): """ IBN-ResNet-101 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_ibnresnet(blocks=101, model_name="ibn_resnet101", **kwargs) def ibn_resnet152(**kwargs): """ IBN-ResNet-152 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_ibnresnet(blocks=152, model_name="ibn_resnet152", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ ibn_resnet50, ibn_resnet101, ibn_resnet152, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != ibn_resnet50 or weight_count == 25557032) assert (model != ibn_resnet101 or weight_count == 44549160) assert (model != ibn_resnet152 or weight_count == 60192808) if __name__ == "__main__": _test()
14,465
31.290179
118
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/common.py
""" Common routines for models in TensorFlow 2.0. """ __all__ = ['is_channels_first', 'get_channel_axis', 'round_channels', 'get_im_size', 'interpolate_im', 'BreakBlock', 'ReLU6', 'HSwish', 'PReLU2', 'get_activation_layer', 'flatten', 'MaxPool2d', 'AvgPool2d', 'GlobalAvgPool2d', 'BatchNorm', 'InstanceNorm', 'IBN', 'Conv1d', 'Conv2d', 'SelectableDense', 'DenseBlock', 'ConvBlock1d', 'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'ConvBlock', 'conv1x1_block', 'conv3x3_block', 'conv5x5_block', 'conv7x7_block', 'dwconv_block', 'dwconv3x3_block', 'dwconv5x5_block', 'dwsconv3x3_block', 'PreConvBlock', 'pre_conv1x1_block', 'pre_conv3x3_block', 'DeconvBlock', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'SABlock', 'SAConvBlock', 'saconv3x3_block', 'PixelShuffle', 'DucBlock', 'Identity', 'SimpleSequential', 'ParametricSequential', 'DualPathSequential', 'Concurrent', 'SequentialConcurrent', 'ParametricConcurrent', 'MultiOutputSequential', 'ParallelConcurent', 'DualPathParallelConcurent', 'NormActivation', 'InterpolationBlock', 'Hourglass', 'HeatmapMaxDetBlock'] import math from inspect import isfunction import numpy as np import tensorflow as tf import tensorflow.keras.layers as nn from tensorflow.python.keras import backend as K from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import initializers def is_channels_first(data_format): """ Is tested data format channels first. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- bool A flag. """ return data_format == "channels_first" def get_channel_axis(data_format): """ Get channel axis. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- int Channel axis. """ return 1 if is_channels_first(data_format) else -1 def round_channels(channels, divisor=8): """ Round weighted channel number (make divisible operation). Parameters: ---------- channels : int or float Original number of channels. divisor : int, default 8 Alignment value. Returns: ------- int Weighted number of channels. """ rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor) if float(rounded_channels) < 0.9 * channels: rounded_channels += divisor return rounded_channels def get_im_size(x, data_format): """ Get spatial size for a tensor. Parameters: ---------- x : tensor A tensor. data_format : str The ordering of the dimensions in the tensor. Returns: ------- (int, int) Size (height x width). """ x_shape = x.get_shape().as_list() return x_shape[2:4] if is_channels_first(data_format) else x_shape[1:3] def interpolate_im(x, scale_factor=1, out_size=None, data_format="channels_last"): """ Bilinear change spatial size for a tensor. Parameters: ---------- x : tensor A tensor. scale_factor : int, default 1 Multiplier for spatial size. out_size : tuple of 2 int, default None Spatial size of the output tensor for the bilinear upsampling operation. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- tensor Resulted tensor. """ if out_size is None: in_size = get_im_size(x, data_format=data_format) out_size = tuple(i * scale_factor for i in in_size) if scale_factor != 0 else in_size if is_channels_first(data_format): x = tf.transpose(x, perm=[0, 2, 3, 1]) x = tf.image.resize( images=x, size=out_size) if is_channels_first(data_format): x = tf.transpose(x, perm=[0, 3, 1, 2]) return x class BreakBlock(nn.Layer): """ Break coonnection block for hourglass. """ def __init__(self, **kwargs): super(BreakBlock, self).__init__(**kwargs) def call(self, x): return None def __repr__(self): return '{name}()'.format(name=self.__class__.__name__) class ReLU6(nn.Layer): """ ReLU6 activation layer. """ def __init__(self, **kwargs): super(ReLU6, self).__init__(**kwargs) def call(self, x): return tf.nn.relu6(x) class Swish(nn.Layer): """ Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941. """ def call(self, x): return x * tf.nn.sigmoid(x) class HSigmoid(nn.Layer): """ Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. """ def __init__(self, **kwargs): super(HSigmoid, self).__init__(**kwargs) def call(self, x): return tf.nn.relu6(x + 3.0) / 6.0 class HSwish(nn.Layer): """ H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244. """ def __init__(self, **kwargs): super(HSwish, self).__init__(**kwargs) def call(self, x): return x * tf.nn.relu6(x + 3.0) / 6.0 class PReLU2(nn.PReLU): """ Parametric leaky version of a Rectified Linear Unit (with wide alpha). Parameters: ---------- in_channels : int Number of input channels. alpha_initializer : tf.Initializer, default tf.constant_initializer(0.25) Initializer function for the weights. shared_axes : list of int, default None The axes along which to share learnable parameters for the activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels=1, alpha_initializer=tf.constant_initializer(0.25), data_format="channels_last", **kwargs): self.in_channels = in_channels self.data_format = data_format super(PReLU2, self).__init__( alpha_initializer=alpha_initializer, **kwargs) def build(self, input_shape): self.alpha = self.add_weight( shape=(self.in_channels,), name="alpha", initializer=self.alpha_initializer, regularizer=self.alpha_regularizer, constraint=self.alpha_constraint) channel_axis = (1 if is_channels_first(self.data_format) else len(input_shape) - 1) assert (self.in_channels == input_shape[channel_axis]) axes = {} for i in range(1, len(input_shape)): if i != channel_axis: axes[i] = input_shape[i] self.input_spec = tf.keras.layers.InputSpec(ndim=len(input_shape), axes=axes) self.built = True def call(self, x): if is_channels_first(self.data_format) and (len(x.shape.as_list()) == 4): x = tf.transpose(x, perm=[0, 2, 3, 1]) pos = K.relu(x) neg = -self.alpha * K.relu(-x) x = pos + neg if is_channels_first(self.data_format) and (len(x.shape.as_list()) == 4): x = tf.transpose(x, perm=[0, 3, 1, 2]) return x class Tanh(nn.Layer): """ Tanh activation function. """ def __init__(self, **kwargs): super(Tanh, self).__init__(**kwargs) def call(self, x): return tf.math.tanh(x) def get_activation_layer(activation, **kwargs): """ Create activation layer from string/function. Parameters: ---------- activation : function, or str, or nn.Layer Activation function or name of activation function. Returns: ------- nn.Layer Activation layer. """ assert (activation is not None) if isfunction(activation): return activation() elif isinstance(activation, str): if activation == "relu": return nn.ReLU(**kwargs) elif activation == "relu6": return ReLU6(**kwargs) elif activation == "prelu2": return PReLU2(**kwargs) elif activation == "swish": return Swish(**kwargs) elif activation == "hswish": return HSwish(**kwargs) elif activation == "sigmoid": return tf.nn.sigmoid elif activation == "hsigmoid": return HSigmoid(**kwargs) elif activation == "tanh": return Tanh(**kwargs) else: raise NotImplementedError() else: assert (isinstance(activation, nn.Layer)) return activation def flatten(x, data_format): """ Flattens the input to two dimensional. Parameters: ---------- x : Tensor Input tensor. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- Tensor Resulted tensor. """ if not is_channels_first(data_format): x = tf.transpose(x, perm=(0, 3, 1, 2)) x = tf.reshape(x, shape=(-1, np.prod(x.get_shape().as_list()[1:]))) return x class MaxPool2d(nn.Layer): """ Max pooling operation for two dimensional (spatial) data. Parameters: ---------- pool_size : int or tuple/list of 2 int Size of the max pooling windows. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. ceil_mode : bool, default False When `True`, will use ceil instead of floor to compute the output shape. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, pool_size, strides, padding=0, ceil_mode=False, data_format="channels_last", **kwargs): super(MaxPool2d, self).__init__(**kwargs) if isinstance(pool_size, int): pool_size = (pool_size, pool_size) if isinstance(strides, int): strides = (strides, strides) if isinstance(padding, int): padding = (padding, padding) self.use_stride = (strides[0] > 1) or (strides[1] > 1) self.ceil_mode = ceil_mode and self.use_stride self.use_pad = (padding[0] > 0) or (padding[1] > 0) if self.ceil_mode: self.padding = padding self.pool_size = pool_size self.strides = strides self.data_format = data_format elif self.use_pad: if is_channels_first(data_format): self.paddings_tf = [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2] else: self.paddings_tf = [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]] self.pool = nn.MaxPooling2D( pool_size=pool_size, strides=strides, padding="valid", data_format=data_format) def call(self, x): if self.ceil_mode: x_shape = x.get_shape().as_list() if is_channels_first(self.data_format): height = x_shape[2] width = x_shape[3] else: height = x_shape[1] width = x_shape[2] padding = self.padding out_height = float(height + 2 * padding[0] - self.pool_size[0]) / self.strides[0] + 1.0 out_width = float(width + 2 * padding[1] - self.pool_size[1]) / self.strides[1] + 1.0 if math.ceil(out_height) > math.floor(out_height): padding = (padding[0] + 1, padding[1]) if math.ceil(out_width) > math.floor(out_width): padding = (padding[0], padding[1] + 1) if (padding[0] > 0) or (padding[1] > 0): if is_channels_first(self.data_format): paddings_tf = [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2] else: paddings_tf = [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]] x = tf.pad(x, paddings=paddings_tf) elif self.use_pad: x = tf.pad(x, paddings=self.paddings_tf) x = self.pool(x) return x class AvgPool2d(nn.Layer): """ Average pooling operation for two dimensional (spatial) data. Parameters: ---------- pool_size : int or tuple/list of 2 int Size of the max pooling windows. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. ceil_mode : bool, default False When `True`, will use ceil instead of floor to compute the output shape. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, pool_size, strides, padding=0, ceil_mode=False, data_format="channels_last", **kwargs): super(AvgPool2d, self).__init__(**kwargs) if isinstance(pool_size, int): pool_size = (pool_size, pool_size) if isinstance(strides, int): strides = (strides, strides) if isinstance(padding, int): padding = (padding, padding) self.use_stride = (strides[0] > 1) or (strides[1] > 1) self.ceil_mode = ceil_mode and self.use_stride self.use_pad = (padding[0] > 0) or (padding[1] > 0) if self.ceil_mode: self.padding = padding self.pool_size = pool_size self.strides = strides self.data_format = data_format elif self.use_pad: if is_channels_first(data_format): self.paddings_tf = [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2] else: self.paddings_tf = [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]] self.pool = nn.AveragePooling2D( pool_size=pool_size, strides=1, padding="valid", data_format=data_format, name="pool") if self.use_stride: self.stride_pool = nn.AveragePooling2D( pool_size=1, strides=strides, padding="valid", data_format=data_format, name="stride_pool") def call(self, x, training=None): if self.ceil_mode: x_shape = x.get_shape().as_list() if is_channels_first(self.data_format): height = x_shape[2] width = x_shape[3] else: height = x_shape[1] width = x_shape[2] padding = self.padding out_height = float(height + 2 * padding[0] - self.pool_size[0]) / self.strides[0] + 1.0 out_width = float(width + 2 * padding[1] - self.pool_size[1]) / self.strides[1] + 1.0 if math.ceil(out_height) > math.floor(out_height): padding = (padding[0] + 1, padding[1]) if math.ceil(out_width) > math.floor(out_width): padding = (padding[0], padding[1] + 1) if (padding[0] > 0) or (padding[1] > 0): if is_channels_first(self.data_format): paddings_tf = [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2] else: paddings_tf = [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]] x = tf.pad(x, paddings=paddings_tf) elif self.use_pad: x = tf.pad(x, paddings=self.paddings_tf) x = self.pool(x) if self.use_stride: x = self.stride_pool(x) return x class GlobalAvgPool2d(nn.GlobalAvgPool2D): """ Global average pooling. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(GlobalAvgPool2d, self).__init__(data_format=data_format, **kwargs) self.axis = get_channel_axis(data_format) def call(self, x, training=None): x = super(GlobalAvgPool2d, self).call(x, training) x = tf.expand_dims(tf.expand_dims(x, axis=self.axis), axis=self.axis) return x class BatchNorm(nn.BatchNormalization): """ MXNet/Gluon-like batch normalization. Parameters: ---------- momentum : float, default 0.9 Momentum for the moving average. epsilon : float, default 1e-5 Small float added to variance to avoid dividing by zero. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, momentum=0.9, epsilon=1e-5, data_format="channels_last", **kwargs): super(BatchNorm, self).__init__( axis=get_channel_axis(data_format), momentum=momentum, epsilon=epsilon, **kwargs) class InstanceNorm(nn.Layer): """ MXNet/Gluon-like instance normalization layer as in 'Instance Normalization: The Missing Ingredient for Fast Stylization' (https://arxiv.org/abs/1607.08022). On the base of `tensorflow_addons` implementation. Parameters: ---------- epsilon : float, default 1e-5 Small float added to variance to avoid dividing by zero. center : bool, default True If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale : bool, default False If True, multiply by `gamma`. If False, `gamma` is not used. beta_initializer : str, default 'zeros' Initializer for the beta weight. gamma_initializer : str, default 'ones' Initializer for the gamma weight. beta_regularizer : object or None, default None Optional regularizer for the beta weight. gamma_regularizer : object or None, default None Optional regularizer for the gamma weight. beta_constraint : object or None, default None Optional constraint for the beta weight. gamma_constraint : object or None, default None Optional constraint for the gamma weight. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, epsilon=1e-5, center=True, scale=False, beta_initializer="zeros", gamma_initializer="ones", beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, data_format="channels_last", **kwargs): super(InstanceNorm, self).__init__(**kwargs) self.supports_masking = True self.groups = -1 self.axis = get_channel_axis(data_format) self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = tf.keras.initializers.get(beta_initializer) self.gamma_initializer = tf.keras.initializers.get(gamma_initializer) self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer) self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer) self.beta_constraint = tf.keras.constraints.get(beta_constraint) self.gamma_constraint = tf.keras.constraints.get(gamma_constraint) self._check_axis() def build(self, input_shape): self._check_if_input_shape_is_none(input_shape) self._set_number_of_groups_for_instance_norm(input_shape) self._check_size_of_dimensions(input_shape) self._create_input_spec(input_shape) self._add_gamma_weight(input_shape) self._add_beta_weight(input_shape) self.built = True super(InstanceNorm, self).build(input_shape) def call(self, inputs): input_shape = tf.keras.backend.int_shape(inputs) tensor_input_shape = tf.shape(inputs) reshaped_inputs, group_shape = self._reshape_into_groups(inputs, input_shape, tensor_input_shape) normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape) outputs = tf.reshape(normalized_inputs, tensor_input_shape) return outputs def get_config(self): config = { "groups": self.groups, "axis": self.axis, "epsilon": self.epsilon, "center": self.center, "scale": self.scale, "beta_initializer": tf.keras.initializers.serialize(self.beta_initializer), "gamma_initializer": tf.keras.initializers.serialize(self.gamma_initializer), "beta_regularizer": tf.keras.regularizers.serialize(self.beta_regularizer), "gamma_regularizer": tf.keras.regularizers.serialize(self.gamma_regularizer), "beta_constraint": tf.keras.constraints.serialize(self.beta_constraint), "gamma_constraint": tf.keras.constraints.serialize(self.gamma_constraint) } base_config = super(InstanceNorm, self).get_config() return dict(list(base_config.items()) + list(config.items())) def compute_output_shape(self, input_shape): return input_shape def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape): group_shape = [tensor_input_shape[i] for i in range(len(input_shape))] group_shape[self.axis] = input_shape[self.axis] // self.groups group_shape.insert(self.axis, self.groups) group_shape = tf.stack(group_shape) reshaped_inputs = tf.reshape(inputs, group_shape) return reshaped_inputs, group_shape def _apply_normalization(self, reshaped_inputs, input_shape): group_shape = tf.keras.backend.int_shape(reshaped_inputs) group_reduction_axes = list(range(1, len(group_shape))) axis = -2 if self.axis == -1 else self.axis - 1 group_reduction_axes.pop(axis) mean, variance = tf.nn.moments(reshaped_inputs, group_reduction_axes, keepdims=True) gamma, beta = self._get_reshaped_weights(input_shape) normalized_inputs = tf.nn.batch_normalization( reshaped_inputs, mean=mean, variance=variance, scale=gamma, offset=beta, variance_epsilon=self.epsilon) return normalized_inputs def _get_reshaped_weights(self, input_shape): broadcast_shape = self._create_broadcast_shape(input_shape) gamma = None beta = None if self.scale: gamma = tf.reshape(self.gamma, broadcast_shape) if self.center: beta = tf.reshape(self.beta, broadcast_shape) return gamma, beta def _check_if_input_shape_is_none(self, input_shape): dim = input_shape[self.axis] if dim is None: raise ValueError("Axis {} of input tensor should have a defined dimension but the layer received an input " "with shape {}".format(self.axis, input_shape)) def _set_number_of_groups_for_instance_norm(self, input_shape): dim = input_shape[self.axis] if self.groups == -1: self.groups = dim def _check_size_of_dimensions(self, input_shape): dim = input_shape[self.axis] if dim < self.groups: raise ValueError("Number of groups ({}) cannot be more than the number of channels ({})".format( self.groups, dim)) if (dim % self.groups) != 0: raise ValueError('Number of groups ({}) must be a multiple of the number of channels ({})'.format( self.groups, dim)) def _check_axis(self): if self.axis == 0: raise ValueError("You are trying to normalize your batch axis. Do you want to use " "tf.layer.batch_normalization instead") def _create_input_spec(self, input_shape): dim = input_shape[self.axis] self.input_spec = tf.keras.layers.InputSpec( ndim=len(input_shape), axes={self.axis: dim}) def _add_gamma_weight(self, input_shape): dim = input_shape[self.axis] shape = (dim,) if self.scale: self.gamma = self.add_weight( shape=shape, name="gamma", initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint) else: self.gamma = None def _add_beta_weight(self, input_shape): dim = input_shape[self.axis] shape = (dim,) if self.center: self.beta = self.add_weight( shape=shape, name="beta", initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint) else: self.beta = None def _create_broadcast_shape(self, input_shape): broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] // self.groups broadcast_shape.insert(self.axis, self.groups) return broadcast_shape class IBN(nn.Layer): """ Instance-Batch Normalization block from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- channels : int Number of channels. inst_fraction : float, default 0.5 The first fraction of channels for normalization. inst_first : bool, default True Whether instance normalization be on the first part of channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, first_fraction=0.5, inst_first=True, data_format="channels_last", **kwargs): super(IBN, self).__init__(**kwargs) self.inst_first = inst_first self.data_format = data_format h1_channels = int(math.floor(channels * first_fraction)) h2_channels = channels - h1_channels self.split_sections = [h1_channels, h2_channels] if self.inst_first: self.inst_norm = InstanceNorm( scale=True, data_format=data_format, name="inst_norm") self.batch_norm = BatchNorm( data_format=data_format, name="batch_norm") else: self.batch_norm = BatchNorm( data_format=data_format, name="batch_norm") self.inst_norm = InstanceNorm( scale=True, data_format=data_format, name="inst_norm") def call(self, x, training=None): axis = get_channel_axis(self.data_format) x1, x2 = tf.split(x, num_or_size_splits=self.split_sections, axis=axis) if self.inst_first: x1 = self.inst_norm(x1, training=training) x2 = self.batch_norm(x2, training=training) else: x1 = self.batch_norm(x1, training=training) x2 = self.inst_norm(x2, training=training) x = tf.concat([x1, x2], axis=axis) return x class Conv1d(nn.Layer): """ Standard 1D convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int Convolution window size. strides : int, default 1 Strides of the convolution. padding : int, default 0 Padding value for convolution layer. dilation : int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default True Whether the layer uses a bias vector. force_same : bool, default False Whether to forcibly set `same` padding. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides=1, padding=0, dilation=1, groups=1, use_bias=True, force_same=False, data_format="channels_last", **kwargs): super(Conv1d, self).__init__(**kwargs) assert (in_channels is not None) assert (not force_same) or ((padding == kernel_size // 2) and (strides == 1) and (dilation == 1)) self.use_conv = (groups == 1) self.use_dw_conv = (groups > 1) and (groups == out_channels) and (out_channels == in_channels) self.data_format = data_format self.use_pad = (padding > 0) and (not force_same) if self.use_pad: self.pad = nn.ZeroPadding1D(padding=padding) if self.use_conv: self.conv = nn.Conv1D( filters=out_channels, kernel_size=kernel_size, strides=strides, padding=("valid" if not force_same else "same"), data_format=data_format, dilation_rate=dilation, use_bias=use_bias, name="conv") elif self.use_dw_conv: self.dw_conv = nn.DepthwiseConv2D( kernel_size=(kernel_size, 1), strides=strides, padding=("valid" if not force_same else "same"), data_format=data_format, dilation_rate=dilation, use_bias=use_bias, name="dw_conv") def call(self, x): if self.use_pad: if is_channels_first(self.data_format): x = tf.transpose(x, perm=(0, 2, 1)) x = self.pad(x) if is_channels_first(self.data_format): x = tf.transpose(x, perm=(0, 2, 1)) if self.use_conv: x = self.conv(x) elif self.use_dw_conv: if is_channels_first(self.data_format): x = tf.expand_dims(x, axis=3) else: x = tf.expand_dims(x, axis=2) x = self.dw_conv(x) if is_channels_first(self.data_format): x = tf.squeeze(x, axis=3) else: x = tf.squeeze(x, axis=2) return x class Conv2d(nn.Layer): """ Standard convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 0 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default True Whether the layer uses a bias vector. force_same : bool, default False Whether to forcibly set `same` padding. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides=1, padding=0, dilation=1, groups=1, use_bias=True, force_same=False, data_format="channels_last", **kwargs): super(Conv2d, self).__init__(**kwargs) assert (in_channels is not None) self.data_format = data_format self.use_conv = (groups == 1) self.use_dw_conv = (groups > 1) and (groups == out_channels) and (out_channels == in_channels) # assert (strides == 1) or (dilation == 1) if isinstance(kernel_size, int): kernel_size = (kernel_size, kernel_size) if isinstance(strides, int): strides = (strides, strides) if isinstance(padding, int): padding = (padding, padding) if isinstance(dilation, int): dilation = (dilation, dilation) self.use_pad = ((padding[0] > 0) or (padding[1] > 0)) and (not force_same) if self.use_pad: self.pad = nn.ZeroPadding2D( padding=padding, data_format=data_format) # if is_channels_first(data_format): # self.paddings_tf = [[0, 0], [0, 0], list(padding), list(padding)] # else: # self.paddings_tf = [[0, 0], list(padding), list(padding), [0, 0]] # self.use_post_pad = (dilation[0] > 1) and (dilation[0] % 2 == 1) and (dilation[0] == dilation[1]) and\ # (dilation[0] == padding[1]) and (padding[0] == padding[1]) # if self.use_post_pad: # self.post_pad = nn.ZeroPadding2D( # padding=((1, 0), (1, 0)), # data_format=data_format) assert (not force_same) or ((padding[0] == kernel_size[0] // 2) and (padding[1] == kernel_size[1] // 2) and (strides[0] == 1) and (strides[0] == strides[1]) and (dilation[0] == 1) and (dilation[0] == dilation[1])) if self.use_conv: self.conv = nn.Conv2D( filters=out_channels, kernel_size=kernel_size, strides=strides, padding=("valid" if not force_same else "same"), data_format=data_format, dilation_rate=dilation, use_bias=use_bias, name="conv") elif self.use_dw_conv: # assert (dilation[0] == 1) and (dilation[1] == 1) self.dw_conv = nn.DepthwiseConv2D( kernel_size=kernel_size, strides=strides, padding=("valid" if not force_same else "same"), data_format=data_format, dilation_rate=dilation, use_bias=use_bias, name="dw_conv") else: assert (groups > 1) assert (in_channels % groups == 0) assert (out_channels % groups == 0) self.groups = groups self.convs = [] for i in range(groups): self.convs.append(nn.Conv2D( filters=(out_channels // groups), kernel_size=kernel_size, strides=strides, padding="valid", data_format=data_format, dilation_rate=dilation, use_bias=use_bias, name="convgroup{}".format(i + 1))) def call(self, x): if self.use_pad: x = self.pad(x) # x = tf.pad(x, paddings=self.paddings_tf) if self.use_conv: try: x = self.conv(x) except tf.errors.InvalidArgumentError as ex: if self.conv.dilation_rate != (1, 1): conv_ = nn.Conv2D( filters=self.conv.filters, kernel_size=self.conv.kernel_size, strides=self.conv.strides, padding="valid", data_format=self.data_format, dilation_rate=self.conv.dilation_rate, use_bias=self.conv.use_bias, name="conv_") _ = conv_(x) conv_.weights[0].assign(self.conv.weights[0]) if len(self.conv.weights) > 1: conv_.weights[1].assign(self.conv.weights[1]) x = conv_(x) else: raise ex # x = self.conv(x) elif self.use_dw_conv: x = self.dw_conv(x) else: yy = [] xx = tf.split(x, num_or_size_splits=self.groups, axis=get_channel_axis(self.data_format)) for xi, convi in zip(xx, self.convs): yy.append(convi(xi)) x = tf.concat(yy, axis=get_channel_axis(self.data_format)) # if self.use_post_pad: # x = self.post_pad(x) return x class SelectableDense(nn.Layer): """ Selectable dense layer. Parameters: ---------- in_channels : int Number of input features. out_channels : int Number of output features. use_bias : bool, default False Whether the layer uses a bias vector. weight_initializer : str or `Initializer`, default 'glorot_uniform' Initializer for the `kernel` weights matrix. bias_initializer: str or `Initializer` Initializer for the bias vector. num_options : int, default 1 Number of selectable options. """ def __init__(self, in_channels, out_channels, use_bias=False, weight_initializer="glorot_uniform", bias_initializer="zeros", num_options=1, **kwargs): super(SelectableDense, self).__init__(**kwargs) self.in_channels = in_channels self.out_channels = out_channels self.use_bias = use_bias self.num_options = num_options self.weight_initializer = initializers.get(weight_initializer) self.bias_initializer = initializers.get(bias_initializer) self.supports_masking = True self.input_spec = tf.keras.layers.InputSpec(min_ndim=2) def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) last_dim = tensor_shape.dimension_value(input_shape[-1]) self.input_spec = tf.keras.layers.InputSpec(min_ndim=2, axes={-1: last_dim}) self.weight = self.add_weight( "weight", shape=[self.num_options, self.out_channels, self.in_channels], initializer=self.weight_initializer, regularizer=None, constraint=None, dtype=self.dtype, trainable=True) if self.use_bias: self.bias = self.add_weight( "bias", shape=[self.num_options, self.out_channels], initializer=self.bias_initializer, regularizer=None, constraint=None, dtype=self.dtype, trainable=True) else: self.bias = None self.built = True def call(self, x, indices): weight = tf.gather(self.weight.value(), indices=indices, axis=0) x = tf.expand_dims(x, axis=-1) x = tf.keras.backend.batch_dot(weight, x) x = tf.squeeze(x, axis=-1) if self.use_bias: bias = tf.gather(self.bias.value(), indices=indices, axis=0) x += bias return x def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape) input_shape = input_shape.with_rank_at_least(2) return input_shape[:-1].concatenate(self.out_channels) def get_config(self): config = { "in_channels": self.in_channels, "out_channels": self.out_channels, "use_bias": self.use_bias, "num_options": self.num_options, "weight_initializer": initializers.serialize(self.weight_initializer), "bias_initializer": initializers.serialize(self.bias_initializer), } base_config = super(SelectableDense, self).get_config() return dict(list(base_config.items()) + list(config.items())) class DenseBlock(nn.Layer): """ Standard dense block with Batch normalization and activation. Parameters: ---------- in_channels : int Number of input features. out_channels : int Number of output features. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, use_bias=False, use_bn=True, bn_eps=1e-5, activation="relu", data_format="channels_last", **kwargs): super(DenseBlock, self).__init__(**kwargs) self.activate = (activation is not None) self.use_bn = use_bn self.fc = nn.Dense( units=out_channels, use_bias=use_bias, input_dim=in_channels, name="fc") if self.use_bn: self.bn = BatchNorm( epsilon=bn_eps, data_format=data_format, name="bn") if self.activate: self.activ = get_activation_layer(activation, name="activ") def call(self, x, training=None): x = self.fc(x) if self.use_bn: x = self.bn(x, training=training) if self.activate: x = self.activ(x) return x class ConvBlock1d(nn.Layer): """ Standard 1D convolution block with Batch normalization and activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int Convolution window size. strides : int or Strides of the convolution. padding : int Padding value for convolution layer. dilation : int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. force_same : bool, default False Whether to forcibly set `same` padding in convolution. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, dilation=1, groups=1, use_bias=False, force_same=False, use_bn=True, bn_eps=1e-5, activation="relu", data_format="channels_last", **kwargs): super(ConvBlock1d, self).__init__(**kwargs) assert (in_channels is not None) self.activate = (activation is not None) self.use_bn = use_bn self.conv = Conv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, dilation=dilation, groups=groups, use_bias=use_bias, force_same=force_same, data_format=data_format, name="conv") if self.use_bn: self.bn = BatchNorm( epsilon=bn_eps, data_format=data_format, name="bn") if self.activate: self.activ = get_activation_layer(activation, name="activ") def call(self, x, training=None): x = self.conv(x) if self.use_bn: x = self.bn(x, training=training) if self.activate: x = self.activ(x) return x def conv1x1(in_channels, out_channels, strides=1, groups=1, use_bias=False, data_format="channels_last", **kwargs): """ Convolution 1x1 layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, strides=strides, groups=groups, use_bias=use_bias, data_format=data_format, **kwargs) def conv3x3(in_channels, out_channels, strides=1, padding=1, dilation=1, groups=1, use_bias=False, data_format="channels_last", **kwargs): """ Convolution 3x3 layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, strides=strides, padding=padding, dilation=dilation, groups=groups, use_bias=use_bias, data_format=data_format, **kwargs) def depthwise_conv3x3(channels, strides=1, padding=1, dilation=1, use_bias=False, data_format="channels_last", **kwargs): """ Depthwise convolution 3x3 layer. Parameters: ---------- channels : int Number of input/output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return Conv2d( in_channels=channels, out_channels=channels, kernel_size=3, strides=strides, padding=padding, dilation=dilation, groups=channels, use_bias=use_bias, data_format=data_format, **kwargs) class ConvBlock(nn.Layer): """ Standard convolution block with Batch normalization and activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. force_same : bool, default False Whether to forcibly set `same` padding in convolution. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, dilation=1, groups=1, use_bias=False, force_same=False, use_bn=True, bn_eps=1e-5, activation="relu", data_format="channels_last", **kwargs): super(ConvBlock, self).__init__(**kwargs) assert (in_channels is not None) self.activate = (activation is not None) self.use_bn = use_bn self.conv = Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, dilation=dilation, groups=groups, use_bias=use_bias, force_same=force_same, data_format=data_format, name="conv") if self.use_bn: self.bn = BatchNorm( epsilon=bn_eps, data_format=data_format, name="bn") if self.activate: self.activ = get_activation_layer(activation, name="activ") def call(self, x, training=None): x = self.conv(x) if self.use_bn: x = self.bn(x, training=training) if self.activate: x = self.activ(x) return x def conv1x1_block(in_channels, out_channels, strides=1, padding=0, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation="relu", data_format="channels_last", **kwargs): """ 1x1 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 0 Padding value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=1, strides=strides, padding=padding, groups=groups, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation, data_format=data_format, **kwargs) def conv3x3_block(in_channels, out_channels, strides=1, padding=1, dilation=1, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation="relu", data_format="channels_last", **kwargs): """ 3x3 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, strides=strides, padding=padding, dilation=dilation, groups=groups, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation, data_format=data_format, **kwargs) def conv5x5_block(in_channels, out_channels, strides=1, padding=2, dilation=1, groups=1, use_bias=False, bn_eps=1e-5, activation="relu", data_format="channels_last", **kwargs): """ 5x5 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 2 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=5, strides=strides, padding=padding, dilation=dilation, groups=groups, use_bias=use_bias, bn_eps=bn_eps, activation=activation, data_format=data_format, **kwargs) def conv7x7_block(in_channels, out_channels, strides=1, padding=3, use_bias=False, use_bn=True, bn_eps=1e-5, activation="relu", data_format="channels_last", **kwargs): """ 7x7 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 3 Padding value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=7, strides=strides, padding=padding, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation, data_format=data_format, **kwargs) def dwconv_block(in_channels, out_channels, kernel_size, strides, padding, dilation=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation="relu", data_format="channels_last", **kwargs): """ Depthwise version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, dilation=dilation, groups=out_channels, use_bias=use_bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation, data_format=data_format, **kwargs) def dwconv3x3_block(in_channels, out_channels, strides=1, padding=1, dilation=1, use_bias=False, bn_eps=1e-5, activation="relu", data_format="channels_last", **kwargs): """ 3x3 depthwise version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return dwconv_block( in_channels=in_channels, out_channels=out_channels, kernel_size=3, strides=strides, padding=padding, dilation=dilation, use_bias=use_bias, bn_eps=bn_eps, activation=activation, data_format=data_format, **kwargs) def dwconv5x5_block(in_channels, out_channels, strides=1, padding=2, dilation=1, use_bias=False, bn_eps=1e-5, activation="relu", data_format="channels_last", **kwargs): """ 5x5 depthwise version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 2 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return dwconv_block( in_channels=in_channels, out_channels=out_channels, kernel_size=5, strides=strides, padding=padding, dilation=dilation, use_bias=use_bias, bn_eps=bn_eps, activation=activation, data_format=data_format, **kwargs) class DwsConvBlock(nn.Layer): """ Depthwise separable convolution block with BatchNorms and activations at each convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. dw_force_same : bool, default False Whether to forcibly set `same` padding in depthwise convolution block. pw_force_same : bool, default False Whether to forcibly set `same` padding in pointwise convolution block. dw_use_bn : bool, default True Whether to use BatchNorm layer (depthwise convolution block). pw_use_bn : bool, default True Whether to use BatchNorm layer (pointwise convolution block). bn_eps : float, default 1e-5 Small float added to variance in Batch norm. dw_activation : function or str or None, default 'relu' Activation function after the depthwise convolution block. pw_activation : function or str or None, default 'relu' Activation function after the pointwise convolution block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, dilation=1, use_bias=False, dw_force_same=False, pw_force_same=False, dw_use_bn=True, pw_use_bn=True, bn_eps=1e-5, dw_activation="relu", pw_activation="relu", data_format="channels_last", **kwargs): super(DwsConvBlock, self).__init__(**kwargs) self.dw_conv = dwconv_block( in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, strides=strides, padding=padding, dilation=dilation, use_bias=use_bias, force_same=dw_force_same, use_bn=dw_use_bn, bn_eps=bn_eps, activation=dw_activation, data_format=data_format, name="dw_conv") self.pw_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, force_same=pw_force_same, use_bn=pw_use_bn, bn_eps=bn_eps, activation=pw_activation, data_format=data_format, name="pw_conv") def call(self, x, training=None): x = self.dw_conv(x, training=training) x = self.pw_conv(x, training=training) return x def dwsconv3x3_block(in_channels, out_channels, strides=1, padding=1, dilation=1, use_bias=False, bn_eps=1e-5, dw_activation="relu", pw_activation="relu", data_format="channels_last", **kwargs): """ 3x3 depthwise separable version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. dw_activation : function or str or None, default 'relu' Activation function after the depthwise convolution block. pw_activation : function or str or None, default 'relu' Activation function after the pointwise convolution block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return DwsConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, strides=strides, padding=padding, dilation=dilation, use_bias=use_bias, bn_eps=bn_eps, dw_activation=dw_activation, pw_activation=pw_activation, data_format=data_format, **kwargs) class PreConvBlock(nn.Layer): """ Convolution block with Batch normalization and ReLU pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. return_preact : bool, default False Whether return pre-activation. It's used by PreResNet. activate : bool, default True Whether activate the convolution block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, dilation=1, groups=1, use_bias=False, use_bn=True, return_preact=False, activate=True, data_format="channels_last", **kwargs): super(PreConvBlock, self).__init__(**kwargs) self.return_preact = return_preact self.activate = activate self.use_bn = use_bn if self.use_bn: self.bn = BatchNorm( data_format=data_format, name="bn") if self.activate: self.activ = nn.ReLU() self.conv = Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, dilation=dilation, groups=groups, use_bias=use_bias, data_format=data_format, name="conv") def call(self, x, training=None): if self.use_bn: x = self.bn(x, training=training) if self.activate: x = self.activ(x) if self.return_preact: x_pre_activ = x x = self.conv(x) if self.return_preact: return x, x_pre_activ else: return x def pre_conv1x1_block(in_channels, out_channels, strides=1, use_bias=False, use_bn=True, return_preact=False, activate=True, data_format="channels_last", **kwargs): """ 1x1 version of the pre-activated convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. return_preact : bool, default False Whether return pre-activation. activate : bool, default True Whether activate the convolution block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return PreConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=1, strides=strides, padding=0, use_bias=use_bias, use_bn=use_bn, return_preact=return_preact, activate=activate, data_format=data_format, **kwargs) def pre_conv3x3_block(in_channels, out_channels, strides=1, padding=1, dilation=1, groups=1, use_bias=False, use_bn=True, return_preact=False, activate=True, data_format="channels_last", **kwargs): """ 3x3 version of the pre-activated convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. return_preact : bool, default False Whether return pre-activation. activate : bool, default True Whether activate the convolution block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return PreConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, strides=strides, padding=padding, dilation=dilation, groups=groups, use_bias=use_bias, use_bn=use_bn, return_preact=return_preact, activate=activate, data_format=data_format, **kwargs) class Deconv2d(nn.Layer): """ Standard deconvolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 0 Padding value for convolution layer. out_padding : int or tuple/list of 2 int, default 0 Output padding value for deconvolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default True Whether the layer uses a bias vector. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides=1, padding=0, out_padding=0, dilation=1, groups=1, use_bias=True, data_format="channels_last", **kwargs): super(Deconv2d, self).__init__(**kwargs) assert (dilation == 1) assert (groups == 1) assert (in_channels is not None) if isinstance(padding, int): padding = (padding, padding) self.use_crop = (padding[0] > 0) or (padding[1] > 0) if self.use_crop: self.crop = nn.Cropping2D( cropping=padding, data_format=data_format, name="crop") self.conv = nn.Conv2DTranspose( filters=out_channels, kernel_size=kernel_size, strides=strides, padding="valid", output_padding=out_padding, data_format=data_format, dilation_rate=dilation, use_bias=use_bias, name="conv") def call(self, x): x = self.conv(x) if self.use_crop: x = self.crop(x) return x class DeconvBlock(nn.Layer): """ Deconvolution block with batch normalization and activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the deconvolution. padding : int or tuple/list of 2 int Padding value for deconvolution layer. out_padding : int or tuple/list of 2 int, default 0 Output padding value for deconvolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for deconvolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, out_padding=0, dilation=1, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation="relu", data_format="channels_last", **kwargs): super(DeconvBlock, self).__init__(**kwargs) assert (in_channels is not None) self.activate = (activation is not None) self.use_bn = use_bn self.conv = Deconv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, out_padding=out_padding, dilation=dilation, groups=groups, use_bias=use_bias, data_format=data_format, name="conv") if self.use_bn: self.bn = BatchNorm( epsilon=bn_eps, data_format=data_format, name="bn") if self.activate: self.activ = get_activation_layer(activation, name="activ") def call(self, x, training=None): x = self.conv(x) if self.use_bn: x = self.bn(x, training=training) if self.activate: x = self.activ(x) return x def channel_shuffle(x, groups, data_format): """ Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. Parameters: ---------- x : Tensor Input tensor. groups : int Number of groups. data_format : str The ordering of the dimensions in tensors. Returns: ------- Tensor Resulted tensor. """ x_shape = x.get_shape().as_list() if is_channels_first(data_format): channels = x_shape[1] height = x_shape[2] width = x_shape[3] else: height = x_shape[1] width = x_shape[2] channels = x_shape[3] assert (channels % groups == 0) channels_per_group = channels // groups if is_channels_first(data_format): x = tf.reshape(x, shape=(-1, groups, channels_per_group, height, width)) x = tf.transpose(x, perm=(0, 2, 1, 3, 4)) x = tf.reshape(x, shape=(-1, channels, height, width)) else: x = tf.reshape(x, shape=(-1, height, width, groups, channels_per_group)) x = tf.transpose(x, perm=(0, 1, 2, 4, 3)) x = tf.reshape(x, shape=(-1, height, width, channels)) return x class ChannelShuffle(nn.Layer): """ Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups. Parameters: ---------- channels : int Number of channels. groups : int Number of groups. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, groups, data_format="channels_last", **kwargs): super(ChannelShuffle, self).__init__(**kwargs) assert (channels % groups == 0) self.groups = groups self.data_format = data_format def call(self, x): return channel_shuffle(x, groups=self.groups, data_format=self.data_format) def channel_shuffle2(x, channels_per_group, data_format): """ Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,' https://arxiv.org/abs/1707.01083. The alternative version. Parameters: ---------- x : Tensor Input tensor. channels_per_group : int Number of groups. data_format : str Number of channels per group. Returns: ------- keras.Tensor Resulted tensor. """ x_shape = x.get_shape().as_list() if is_channels_first(data_format): channels = x_shape[1] height = x_shape[2] width = x_shape[3] else: height = x_shape[1] width = x_shape[2] channels = x_shape[3] assert (channels % channels_per_group == 0) groups = channels // channels_per_group if is_channels_first(data_format): x = tf.reshape(x, shape=(-1, channels_per_group, groups, height, width)) x = tf.transpose(x, perm=(0, 2, 1, 3, 4)) x = tf.reshape(x, shape=(-1, channels, height, width)) else: x = tf.reshape(x, shape=(-1, height, width, channels_per_group, groups)) x = tf.transpose(x, perm=(0, 1, 2, 4, 3)) x = tf.reshape(x, shape=(-1, height, width, channels)) return x class ChannelShuffle2(nn.Layer): """ Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups. The alternative version. Parameters: ---------- channels : int Number of channels. groups : int Number of groups. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, groups, data_format="channels_last", **kwargs): super(ChannelShuffle2, self).__init__(**kwargs) assert (channels % groups == 0) self.channels_per_group = channels // groups self.data_format = data_format def call(self, x): return channel_shuffle2(x, channels_per_group=self.channels_per_group, data_format=self.data_format) class SEBlock(nn.Layer): """ Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : int Number of channels. reduction : int, default 16 Squeeze reduction value. mid_channels : int or None, default None Number of middle channels. round_mid : bool, default False Whether to round middle channel number (make divisible by 8). use_conv : bool, default True Whether to convolutional layers instead of fully-connected ones. activation : function, or str, or nn.Layer, default 'relu' Activation function after the first convolution. out_activation : function, or str, or nn.Layer, default 'sigmoid' Activation function after the last convolution. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, reduction=16, mid_channels=None, round_mid=False, use_conv=True, mid_activation="relu", out_activation="sigmoid", data_format="channels_last", **kwargs): super(SEBlock, self).__init__(**kwargs) self.use_conv = use_conv self.data_format = data_format if mid_channels is None: mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction) self.pool = nn.GlobalAveragePooling2D( data_format=data_format, name="pool") if use_conv: self.conv1 = conv1x1( in_channels=channels, out_channels=mid_channels, use_bias=True, data_format=data_format, name="conv1") else: self.fc1 = nn.Dense( units=mid_channels, input_dim=channels, name="fc1") self.activ = get_activation_layer(mid_activation, name="activ") if use_conv: self.conv2 = conv1x1( in_channels=mid_channels, out_channels=channels, use_bias=True, data_format=data_format, name="conv2") else: self.fc2 = nn.Dense( units=channels, input_dim=mid_channels, name="fc2") self.sigmoid = get_activation_layer(out_activation, name="sigmoid") def call(self, x, training=None): w = self.pool(x) if self.use_conv: axis = -1 if is_channels_first(self.data_format) else 1 w = tf.expand_dims(tf.expand_dims(w, axis=axis), axis=axis) w = self.conv1(w) if self.use_conv else self.fc1(w) w = self.activ(w) w = self.conv2(w) if self.use_conv else self.fc2(w) w = self.sigmoid(w) if not self.use_conv: axis = -1 if is_channels_first(self.data_format) else 1 w = tf.expand_dims(tf.expand_dims(w, axis=axis), axis=axis) x = x * w return x class SABlock(nn.Layer): """ Split-Attention block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- out_channels : int Number of output channels. groups : int Number of channel groups (cardinality, without radix). radix : int Number of splits within a cardinal group. reduction : int, default 4 Squeeze reduction value. min_channels : int, default 32 Minimal number of squeezed channels. use_conv : bool, default True Whether to convolutional layers instead of fully-connected ones. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, out_channels, groups, radix, reduction=4, min_channels=32, use_conv=True, bn_eps=1e-5, data_format="channels_last", **kwargs): super(SABlock, self).__init__(**kwargs) self.groups = groups self.radix = radix self.use_conv = use_conv self.data_format = data_format self.axis = get_channel_axis(data_format) in_channels = out_channels * radix mid_channels = max(in_channels // reduction, min_channels) self.pool = nn.GlobalAveragePooling2D( data_format=data_format, name="pool") if use_conv: self.conv1 = conv1x1( in_channels=out_channels, out_channels=mid_channels, use_bias=True, data_format=data_format, name="conv1") else: self.fc1 = nn.Dense( units=mid_channels, input_dim=out_channels, name="fc1") self.bn = BatchNorm( epsilon=bn_eps, data_format=data_format, name="bn") self.activ = nn.ReLU() if use_conv: self.conv2 = conv1x1( in_channels=mid_channels, out_channels=in_channels, use_bias=True, data_format=data_format, name="conv2") else: self.fc2 = nn.Dense( units=in_channels, input_dim=mid_channels, name="fc2") self.softmax = nn.Softmax(axis=1) def call(self, x, training=None): x_shape = x.get_shape().as_list() # batch = x_shape[0] if is_channels_first(self.data_format): channels = x_shape[1] height = x_shape[2] width = x_shape[3] x = tf.reshape(x, shape=(-1, self.radix, channels // self.radix, height, width)) w = tf.math.reduce_sum(x, axis=1) else: height = x_shape[1] width = x_shape[2] channels = x_shape[3] x = tf.reshape(x, shape=(-1, height, width, self.radix, channels // self.radix)) w = tf.math.reduce_sum(x, axis=-2) w = self.pool(w) if self.use_conv: axis = -1 if is_channels_first(self.data_format) else 1 w = tf.expand_dims(tf.expand_dims(w, axis=axis), axis=axis) w = self.conv1(w) if self.use_conv else self.fc1(w) w = self.bn(w, training=training) w = self.activ(w) w = self.conv2(w) if self.use_conv else self.fc2(w) w = tf.reshape(w, shape=(-1, self.groups, self.radix, channels // self.groups // self.radix)) w = tf.transpose(w, perm=(0, 2, 1, 3)) w = self.softmax(w) if is_channels_first(self.data_format): w = tf.reshape(w, shape=(-1, self.radix, channels // self.radix, 1, 1)) else: w = tf.reshape(w, shape=(-1, 1, 1, self.radix, channels // self.radix)) x = x * w if is_channels_first(self.data_format): x = tf.math.reduce_sum(x, axis=1) else: x = tf.math.reduce_sum(x, axis=-2) return x class SAConvBlock(nn.Layer): """ Split-Attention convolution block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. force_same : bool, default False Whether to forcibly set `same` padding in convolution. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. radix : int, default 2 Number of splits within a cardinal group. reduction : int, default 4 Squeeze reduction value. min_channels : int, default 32 Minimal number of squeezed channels. use_conv : bool, default True Whether to convolutional layers instead of fully-connected ones. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, dilation=1, groups=1, use_bias=False, force_same=False, use_bn=True, bn_eps=1e-5, activation="relu", radix=2, reduction=4, min_channels=32, use_conv=True, data_format="channels_last", **kwargs): super(SAConvBlock, self).__init__(**kwargs) self.conv = ConvBlock( in_channels=in_channels, out_channels=(out_channels * radix), kernel_size=kernel_size, strides=strides, padding=padding, dilation=dilation, groups=(groups * radix), use_bias=use_bias, force_same=force_same, use_bn=use_bn, bn_eps=bn_eps, activation=activation, data_format=data_format, name="conv") self.att = SABlock( out_channels=out_channels, groups=groups, radix=radix, reduction=reduction, min_channels=min_channels, use_conv=use_conv, bn_eps=bn_eps, data_format=data_format, name="att") def call(self, x, training=None): x = self.conv(x, training=training) x = self.att(x, training=training) return x def saconv3x3_block(in_channels, out_channels, strides=1, padding=1, **kwargs): """ 3x3 version of the Split-Attention convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. """ return SAConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, strides=strides, padding=padding, **kwargs) class PixelShuffle(nn.Layer): """ Pixel-shuffle operation from 'Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network,' https://arxiv.org/abs/1609.05158. Parameters: ---------- scale_factor : int Multiplier for spatial size. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, scale_factor, data_format="channels_last", **kwargs): super(PixelShuffle, self).__init__(**kwargs) self.scale_factor = scale_factor self.data_format = data_format def call(self, x, training=None): if not tf.executing_eagerly(): return PixelShuffle2(scale_factor=self.scale_factor, data_format=self.data_format)(x) f1 = self.scale_factor f2 = self.scale_factor x_shape = x.get_shape().as_list() if is_channels_first(self.data_format): channels = x_shape[1] height = x_shape[2] width = x_shape[3] else: height = x_shape[1] width = x_shape[2] channels = x_shape[3] assert (channels % f1 % f2 == 0) new_channels = channels // f1 // f2 if is_channels_first(self.data_format): x = tf.reshape(x, shape=(-1, new_channels, f1 * f2, height, width)) x = tf.reshape(x, shape=(-1, new_channels, f1, f2, height, width)) x = tf.transpose(x, perm=(0, 1, 4, 2, 5, 3)) x = tf.reshape(x, shape=(-1, new_channels, height * f1, width * f2)) else: x = tf.reshape(x, shape=(-1, height, width, new_channels, f1 * f2)) x = tf.reshape(x, shape=(-1, height, width, new_channels, f1, f2)) x = tf.transpose(x, perm=(0, 1, 4, 2, 5, 3)) x = tf.reshape(x, shape=(-1, height * f1, width * f2, new_channels)) return x class PixelShuffle2(nn.Layer): """ Pixel-shuffle operation from 'Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network,' https://arxiv.org/abs/1609.05158. Alternative implementation. Parameters: ---------- scale_factor : int Multiplier for spatial size. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, scale_factor, data_format="channels_last", **kwargs): super(PixelShuffle2, self).__init__(**kwargs) self.scale_factor = scale_factor self.data_format = data_format def call(self, x, training=None): tf_data_format = "NCHW" if is_channels_first(self.data_format) else "NHWC" x = tf.nn.depth_to_space(input=x, block_size=self.scale_factor, data_format=tf_data_format) return x class DucBlock(nn.Layer): """ Dense Upsampling Convolution (DUC) block from 'Understanding Convolution for Semantic Segmentation,' https://arxiv.org/abs/1702.08502. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. scale_factor : int Multiplier for spatial size. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, scale_factor, data_format="channels_last", **kwargs): super(DucBlock, self).__init__(**kwargs) mid_channels = (scale_factor * scale_factor) * out_channels self.conv = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv") self.pix_shuffle = PixelShuffle( scale_factor=scale_factor, data_format=data_format, name="pix_shuffle") def call(self, x, training=None): x = self.conv(x, training=training) x = self.pix_shuffle(x) return x class Identity(nn.Layer): """ Identity layer. """ def __init__(self, **kwargs): super(Identity, self).__init__(**kwargs) def call(self, x, training=None): return x class SimpleSequential(nn.Layer): """ A sequential layer that can be used instead of tf.keras.Sequential. """ def __init__(self, **kwargs): super(SimpleSequential, self).__init__(**kwargs) self.children = [] def __getitem__(self, i): return self.children[i] def __len__(self): return len(self.children) def add(self, layer): layer._name = "{}/{}".format(self.name, layer._name) self.children.append(layer) def call(self, x, training=None): for block in self.children: x = block(x, training=training) return x class ParametricSequential(SimpleSequential): """ A sequential container for layers with parameters. Layers will be executed in the order they are added. """ def __init__(self, **kwargs): super(ParametricSequential, self).__init__(**kwargs) def call(self, x, **kwargs): for block in self.children: x = block(x, **kwargs) return x class DualPathSequential(SimpleSequential): """ A sequential container for layers with dual inputs/outputs. Layers will be executed in the order they are added. Parameters: ---------- return_two : bool, default True Whether to return two output after execution. first_ordinals : int, default 0 Number of the first layers with single input/output. last_ordinals : int, default 0 Number of the final layers with single input/output. dual_path_scheme : function Scheme of dual path response for a layer. dual_path_scheme_ordinal : function Scheme of dual path response for an ordinal layer. """ def __init__(self, return_two=True, first_ordinals=0, last_ordinals=0, dual_path_scheme=(lambda block, x1, x2, training: block(x1, x2, training)), dual_path_scheme_ordinal=(lambda block, x1, x2, training: (block(x1, training), x2)), **kwargs): super(DualPathSequential, self).__init__(**kwargs) self.return_two = return_two self.first_ordinals = first_ordinals self.last_ordinals = last_ordinals self.dual_path_scheme = dual_path_scheme self.dual_path_scheme_ordinal = dual_path_scheme_ordinal def call(self, x1, x2=None, training=None): length = len(self.children) for i, block in enumerate(self.children): if (i < self.first_ordinals) or (i >= length - self.last_ordinals): x1, x2 = self.dual_path_scheme_ordinal(block, x1, x2, training) else: x1, x2 = self.dual_path_scheme(block, x1, x2, training) if self.return_two: return x1, x2 else: return x1 class Concurrent(SimpleSequential): """ A container for concatenation of layers. Parameters: ---------- stack : bool, default False Whether to concatenate tensors along a new dimension. merge_type : str, default None Type of branch merging. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, stack=False, merge_type=None, data_format="channels_last", **kwargs): super(Concurrent, self).__init__(**kwargs) assert (merge_type is None) or (merge_type in ["cat", "stack", "sum"]) self.axis = get_channel_axis(data_format) if merge_type is not None: self.merge_type = merge_type else: self.merge_type = "stack" if stack else "cat" def call(self, x, training=None): out = [] for block in self.children: out.append(block(x, training=training)) if self.merge_type == "stack": out = tf.stack(out, axis=self.axis) elif self.merge_type == "cat": out = tf.concat(out, axis=self.axis) elif self.merge_type == "sum": out = tf.stack(out, axis=self.axis) out = tf.math.reduce_sum(out, axis=self.axis) else: raise NotImplementedError() return out class SequentialConcurrent(SimpleSequential): """ A sequential container with concatenated outputs. Blocks will be executed in the order they are added. Parameters: ---------- stack : bool, default False Whether to concatenate tensors along a new dimension. cat_input : bool, default True Whether to concatenate input tensor. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, stack=False, cat_input=True, data_format="channels_last", **kwargs): super(SequentialConcurrent, self).__init__(**kwargs) self.axis = get_channel_axis(data_format) self.stack = stack self.cat_input = cat_input def call(self, x, training=None): out = [x] if self.cat_input else [] for block in self.children: x = block(x, training=training) out.append(x) if self.stack: out = tf.stack(out, axis=self.axis) else: out = tf.concat(out, axis=self.axis) return out class ParametricConcurrent(SimpleSequential): """ A container for concatenation of layers with parameters. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(ParametricConcurrent, self).__init__(**kwargs) self.axis = get_channel_axis(data_format) def call(self, x, **kwargs): out = [] for block in self.children: out.append(block(x, **kwargs)) out = tf.concat(out, axis=self.axis) return out class MultiOutputSequential(SimpleSequential): """ A sequential container with multiple outputs. Layers will be executed in the order they are added. Parameters: ---------- multi_output : bool, default True Whether to return multiple output. dual_output : bool, default False Whether to return dual output. return_last : bool, default True Whether to forcibly return last value. """ def __init__(self, multi_output=True, dual_output=False, return_last=True, **kwargs): super(MultiOutputSequential, self).__init__(**kwargs) self.multi_output = multi_output self.dual_output = dual_output self.return_last = return_last def call(self, x, **kwargs): outs = [] for block in self.children: x = block(x, **kwargs) if hasattr(block, "do_output") and block.do_output: outs.append(x) elif hasattr(block, "do_output2") and block.do_output2: assert (type(x) == tuple) outs.extend(x[1]) x = x[0] if self.multi_output: return [x] + outs if self.return_last else outs elif self.dual_output: return x, outs else: return x class ParallelConcurent(SimpleSequential): """ A sequential container with multiple inputs and single/multiple outputs. Modules will be executed in the order they are added. Parameters: ---------- merge_type : str, default None Type of branch merging. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, merge_type=None, data_format="channels_last", **kwargs): super(ParallelConcurent, self).__init__(**kwargs) assert (merge_type is None) or (merge_type in ["list", "cat", "stack", "sum"]) self.axis = get_channel_axis(data_format) self.merge_type = merge_type def call(self, x, training=None): out = [] for block, xi in zip(self.children, x): out.append(block(xi, training=training)) if self.merge_type == "list": pass elif self.merge_type == "stack": out = tf.stack(out, axis=self.axis) elif self.merge_type == "cat": out = tf.concat(out, axis=self.axis) elif self.merge_type == "sum": out = tf.stack(out, axis=self.axis) out = tf.math.reduce_sum(out, axis=self.axis) else: raise NotImplementedError() return out class DualPathParallelConcurent(SimpleSequential): """ A sequential container with multiple dual-path inputs and single/multiple outputs. Modules will be executed in the order they are added. Parameters: ---------- merge_type : str, default 'list' Type of branch merging. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, merge_type="list", data_format="channels_last", **kwargs): super(DualPathParallelConcurent, self).__init__(**kwargs) assert (merge_type is None) or (merge_type in ["list", "cat", "stack", "sum"]) self.axis = get_channel_axis(data_format) self.merge_type = merge_type def call(self, x1, x2, training=None): x1_out = [] x2_out = [] for block, x1i, x2i in zip(self.children, x1, x2): y1i, y2i = block(x1i, x2i, training=training) x1_out.append(y1i) x2_out.append(y2i) if self.merge_type == "list": pass elif self.merge_type == "stack": x1_out = tf.stack(x1_out, axis=self.axis) x2_out = tf.stack(x2_out, axis=self.axis) elif self.merge_type == "cat": x1_out = tf.concat(x1_out, axis=self.axis) x2_out = tf.concat(x2_out, axis=self.axis) elif self.merge_type == "sum": x1_out = tf.math.reduce_sum(tf.stack(x1_out, axis=self.axis), axis=self.axis) x2_out = tf.math.reduce_sum(tf.stack(x2_out, axis=self.axis), axis=self.axis) else: raise NotImplementedError() return x1_out, x2_out class NormActivation(nn.Layer): """ Activation block with preliminary batch normalization. It's used by itself as the final block in PreResNet. Parameters: ---------- in_channels : int Number of input channels. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, bn_eps=1e-5, activation="relu", data_format="channels_last", **kwargs): super(NormActivation, self).__init__(**kwargs) assert (in_channels is not None) self.bn = BatchNorm( epsilon=bn_eps, data_format=data_format, name="bn") self.activ = get_activation_layer(activation, name="activ") def call(self, x, training=None): x = self.bn(x, training=training) x = self.activ(x) return x class InterpolationBlock(nn.Layer): """ Bilinear interpolation block. Parameters: ---------- scale_factor : int, default 1 Multiplier for spatial size. out_size : tuple of 2 int, default None Spatial size of the output tensor for the interpolation operation. up : bool, default True Whether to upsample or downsample. interpolation : str, default 'bilinear' Interpolation mode. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, scale_factor=1, out_size=None, up=True, interpolation="bilinear", data_format="channels_last", **kwargs): super(InterpolationBlock, self).__init__(**kwargs) self.scale_factor = scale_factor self.out_size = out_size self.up = up self.data_format = data_format self.method = tf.image.ResizeMethod.BILINEAR if interpolation == "bilinear" else\ tf.image.ResizeMethod.NEAREST_NEIGHBOR def call(self, x, size=None, training=None): out_size = self.calc_out_size(x) if size is None else size if is_channels_first(self.data_format): x = tf.transpose(x, perm=[0, 2, 3, 1]) x = tf.image.resize( images=x, size=out_size, method=self.method) if is_channels_first(self.data_format): x = tf.transpose(x, perm=[0, 3, 1, 2]) return x def calc_out_size(self, x): if self.out_size is not None: return self.out_size in_size = get_im_size(x, data_format=self.data_format) if self.up: return tuple(s * self.scale_factor for s in in_size) else: return tuple(s // self.scale_factor for s in in_size) class Hourglass(nn.Layer): """ A hourglass block. Parameters: ---------- down_seq : nn.HybridSequential Down modules as sequential. up_seq : nn.HybridSequential Up modules as sequential. skip_seq : nn.HybridSequential Skip connection modules as sequential. merge_type : str, default 'add' Type of concatenation of up and skip outputs. return_first_skip : bool, default False Whether return the first skip connection output. Used in ResAttNet. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, down_seq, up_seq, skip_seq, merge_type="add", return_first_skip=False, data_format="channels_last", **kwargs): super(Hourglass, self).__init__(**kwargs) self.depth = len(down_seq) assert (merge_type in ["cat", "add"]) assert (len(up_seq) == self.depth) assert (len(skip_seq) in (self.depth, self.depth + 1)) self.merge_type = merge_type self.return_first_skip = return_first_skip self.extra_skip = (len(skip_seq) == self.depth + 1) self.axis = get_channel_axis(data_format) self.down_seq = down_seq self.up_seq = up_seq self.skip_seq = skip_seq def _merge(self, x, y): if y is not None: if self.merge_type == "cat": x = tf.concat([x, y], axis=self.axis) elif self.merge_type == "add": x = x + y return x def call(self, x, training=None): y = None down_outs = [x] for down_module in self.down_seq.children: x = down_module(x, training=training) down_outs.append(x) for i in range(len(down_outs)): if i != 0: y = down_outs[self.depth - i] skip_module = self.skip_seq[self.depth - i] y = skip_module(y, training=training) x = self._merge(x, y) if i != len(down_outs) - 1: if (i == 0) and self.extra_skip: skip_module = self.skip_seq[self.depth] x = skip_module(x, training=training) up_module = self.up_seq[self.depth - 1 - i] x = up_module(x, training=training) if self.return_first_skip: return x, y else: return x class HeatmapMaxDetBlock(nn.Layer): """ Heatmap maximum detector block (for human pose estimation task). Parameters: ---------- tune : bool, default True Whether to tune point positions. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, tune=True, data_format="channels_last", **kwargs): super(HeatmapMaxDetBlock, self).__init__(**kwargs) self.tune = tune self.data_format = data_format def call(self, x, training=None): # if not tf.executing_eagerly(): # channels = x.shape[1] if is_channels_first(self.data_format) else x.shape[3] # return x[:, :channels, :3, 0] vector_dim = 2 x_shape = x.get_shape().as_list() batch = x_shape[0] if is_channels_first(self.data_format): channels = x_shape[1] in_size = x_shape[2:] heatmap_vector = tf.reshape(x, shape=(batch, channels, -1)) else: channels = x_shape[3] in_size = x_shape[1:3] heatmap_vector = tf.reshape(x, shape=(batch, -1, channels)) heatmap_vector = tf.transpose(heatmap_vector, perm=(0, 2, 1)) indices = tf.cast(tf.expand_dims(tf.cast(tf.math.argmax(heatmap_vector, axis=vector_dim), np.int32), axis=vector_dim), np.float32) scores = tf.math.reduce_max(heatmap_vector, axis=vector_dim, keepdims=True) scores_mask = tf.cast(tf.math.greater(scores, 0.0), np.float32) pts_x = (indices % in_size[1]) * scores_mask pts_y = (indices // in_size[1]) * scores_mask pts = tf.concat([pts_x, pts_y, scores], axis=vector_dim) if self.tune: pts = pts.numpy() for b in range(batch): for k in range(channels): hm = x[b, k, :, :] if is_channels_first(self.data_format) else x[b, :, :, k] px = int(pts[b, k, 0]) py = int(pts[b, k, 1]) if (0 < px < in_size[1] - 1) and (0 < py < in_size[0] - 1): pts[b, k, 0] += np.sign(hm[py, px + 1] - hm[py, px - 1]) * 0.25 pts[b, k, 1] += np.sign(hm[py + 1, px] - hm[py - 1, px]) * 0.25 pts = tf.convert_to_tensor(pts) return pts
116,234
32.858142
120
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/lwopenpose_cmupan.py
""" Lightweight OpenPose 2D/3D for CMU Panoptic, implemented in TensorFlow. Original paper: 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004. """ __all__ = ['LwOpenPose', 'lwopenpose2d_mobilenet_cmupan_coco', 'lwopenpose3d_mobilenet_cmupan_coco', 'LwopDecoderFinalBlock'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1, conv1x1_block, conv3x3_block, dwsconv3x3_block, SimpleSequential, is_channels_first,\ get_channel_axis class LwopResBottleneck(nn.Layer): """ Bottleneck block for residual path in the residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. use_bias : bool, default True Whether the layer uses a bias vector. bottleneck_factor : int, default 2 Bottleneck factor. squeeze_out : bool, default False Whether to squeeze the output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, use_bias=True, bottleneck_factor=2, squeeze_out=False, data_format="channels_last", **kwargs): super(LwopResBottleneck, self).__init__(**kwargs) mid_channels = out_channels // bottleneck_factor if squeeze_out else in_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_bias=use_bias, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, strides=strides, use_bias=use_bias, data_format=data_format, name="conv2") self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, use_bias=use_bias, activation=None, data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) return x class LwopResUnit(nn.Layer): """ ResNet-like residual unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. use_bias : bool, default True Whether the layer uses a bias vector. bottleneck_factor : int, default 2 Bottleneck factor. squeeze_out : bool, default False Whether to squeeze the output channels. activate : bool, default False Whether to activate the sum. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides=1, use_bias=True, bottleneck_factor=2, squeeze_out=False, activate=False, data_format="channels_last", **kwargs): super(LwopResUnit, self).__init__(**kwargs) self.activate = activate self.resize_identity = (in_channels != out_channels) or (strides != 1) self.body = LwopResBottleneck( in_channels=in_channels, out_channels=out_channels, strides=strides, use_bias=use_bias, bottleneck_factor=bottleneck_factor, squeeze_out=squeeze_out, data_format=data_format, name="body") if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, strides=strides, use_bias=use_bias, activation=None, data_format=data_format, name="identity_conv") if self.activate: self.activ = nn.ReLU() def call(self, x, training=None): if self.resize_identity: identity = self.identity_conv(x, training=training) else: identity = x x = self.body(x, training=training) x = x + identity if self.activate: x = self.activ(x) return x class LwopEncoderFinalBlock(nn.Layer): """ Lightweight OpenPose 2D/3D specific encoder final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(LwopEncoderFinalBlock, self).__init__(**kwargs) self.pre_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=True, use_bn=False, data_format=data_format, name="pre_conv") self.body = SimpleSequential(name="body") for i in range(3): self.body.add(dwsconv3x3_block( in_channels=out_channels, out_channels=out_channels, dw_use_bn=False, pw_use_bn=False, dw_activation=(lambda: nn.ELU()), pw_activation=(lambda: nn.ELU()), data_format=data_format, name="block{}".format(i + 1))) self.post_conv = conv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bias=True, use_bn=False, data_format=data_format, name="post_conv") def call(self, x, training=None): x = self.pre_conv(x, training=training) x = x + self.body(x, training=training) x = self.post_conv(x, training=training) return x class LwopRefinementBlock(nn.Layer): """ Lightweight OpenPose 2D/3D specific refinement block for decoder units. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(LwopRefinementBlock, self).__init__(**kwargs) self.pre_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=True, use_bn=False, data_format=data_format, name="pre_conv") self.body = SimpleSequential(name="body") self.body.add(conv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bias=True, data_format=data_format, name="block1")) self.body.add(conv3x3_block( in_channels=out_channels, out_channels=out_channels, padding=2, dilation=2, use_bias=True, data_format=data_format, name="block2")) def call(self, x, training=None): x = self.pre_conv(x, training=training) # print("--> x.shape={}".format(x.shape)) y = self.body(x, training=training) # print("==> x.shape={}".format(x.shape)) x = x + y return x class LwopDecoderBend(nn.Layer): """ Lightweight OpenPose 2D/3D specific decoder bend block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, mid_channels, out_channels, data_format="channels_last", **kwargs): super(LwopDecoderBend, self).__init__(**kwargs) self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_bias=True, use_bn=False, data_format=data_format, name="conv1") self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, use_bias=True, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) return x class LwopDecoderInitBlock(nn.Layer): """ Lightweight OpenPose 2D/3D specific decoder init block. Parameters: ---------- in_channels : int Number of input channels. keypoints : int Number of keypoints. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, keypoints, data_format="channels_last", **kwargs): super(LwopDecoderInitBlock, self).__init__(**kwargs) self.data_format = data_format num_heatmap = keypoints num_paf = 2 * keypoints bend_mid_channels = 512 self.body = SimpleSequential(name="body") for i in range(3): self.body.add(conv3x3_block( in_channels=in_channels, out_channels=in_channels, use_bias=True, use_bn=False, data_format=data_format, name="block{}".format(i + 1))) self.heatmap_bend = LwopDecoderBend( in_channels=in_channels, mid_channels=bend_mid_channels, out_channels=num_heatmap, data_format=data_format, name="heatmap_bend") self.paf_bend = LwopDecoderBend( in_channels=in_channels, mid_channels=bend_mid_channels, out_channels=num_paf, data_format=data_format, name="paf_bend") def call(self, x, training=None): y = self.body(x, training=training) heatmap = self.heatmap_bend(y, training=training) paf = self.paf_bend(y, training=training) y = tf.concat((x, heatmap, paf), axis=get_channel_axis(self.data_format)) return y class LwopDecoderUnit(nn.Layer): """ Lightweight OpenPose 2D/3D specific decoder init. Parameters: ---------- in_channels : int Number of input channels. keypoints : int Number of keypoints. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, keypoints, data_format="channels_last", **kwargs): super(LwopDecoderUnit, self).__init__(**kwargs) self.data_format = data_format num_heatmap = keypoints num_paf = 2 * keypoints self.features_channels = in_channels - num_heatmap - num_paf self.body = SimpleSequential(name="body") for i in range(5): self.body.add(LwopRefinementBlock( in_channels=in_channels, out_channels=self.features_channels, data_format=data_format, name="block{}".format(i + 1))) in_channels = self.features_channels self.heatmap_bend = LwopDecoderBend( in_channels=self.features_channels, mid_channels=self.features_channels, out_channels=num_heatmap, data_format=data_format, name="heatmap_bend") self.paf_bend = LwopDecoderBend( in_channels=self.features_channels, mid_channels=self.features_channels, out_channels=num_paf, data_format=data_format, name="paf_bend") def call(self, x, training=None): if is_channels_first(self.data_format): features = x[:, :self.features_channels] else: features = x[:, :, :, :self.features_channels] y = self.body(x, training=training) heatmap = self.heatmap_bend(y, training=training) paf = self.paf_bend(y, training=training) y = tf.concat((features, heatmap, paf), axis=get_channel_axis(self.data_format)) return y class LwopDecoderFeaturesBend(nn.Layer): """ Lightweight OpenPose 2D/3D specific decoder 3D features bend. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, mid_channels, out_channels, data_format="channels_last", **kwargs): super(LwopDecoderFeaturesBend, self).__init__(**kwargs) self.body = SimpleSequential(name="body") for i in range(2): self.body.add(LwopRefinementBlock( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="block{}".format(i + 1))) in_channels = mid_channels self.features_bend = LwopDecoderBend( in_channels=mid_channels, mid_channels=mid_channels, out_channels=out_channels, data_format=data_format, name="features_bend") def call(self, x, training=None): x = self.body(x, training=training) x = self.features_bend(x, training=training) return x class LwopDecoderFinalBlock(nn.Layer): """ Lightweight OpenPose 2D/3D specific decoder final block for calcualation 3D poses. Parameters: ---------- in_channels : int Number of input channels. keypoints : int Number of keypoints. bottleneck_factor : int Bottleneck factor. calc_3d_features : bool Whether to calculate 3D features. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, keypoints, bottleneck_factor, calc_3d_features, data_format="channels_last", **kwargs): super(LwopDecoderFinalBlock, self).__init__(**kwargs) self.data_format = data_format self.num_heatmap_paf = 3 * keypoints self.calc_3d_features = calc_3d_features features_out_channels = self.num_heatmap_paf features_in_channels = in_channels - features_out_channels if self.calc_3d_features: self.body = SimpleSequential(name="body") for i in range(5): self.body.add(LwopResUnit( in_channels=in_channels, out_channels=features_in_channels, bottleneck_factor=bottleneck_factor, data_format=data_format, name="block{}".format(i + 1))) in_channels = features_in_channels self.features_bend = LwopDecoderFeaturesBend( in_channels=features_in_channels, mid_channels=features_in_channels, out_channels=features_out_channels, data_format=data_format, name="features_bend") def call(self, x, training=None): if is_channels_first(self.data_format): heatmap_paf_2d = x[:, -self.num_heatmap_paf:] else: heatmap_paf_2d = x[:, :, :, -self.num_heatmap_paf:] if not self.calc_3d_features: return heatmap_paf_2d x = self.body(x, training=training) x = self.features_bend(x, training=training) y = tf.concat((heatmap_paf_2d, x), axis=get_channel_axis(self.data_format)) return y class LwOpenPose(tf.keras.Model): """ Lightweight OpenPose 2D/3D model from 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004. Parameters: ---------- encoder_channels : list of list of int Number of output channels for each encoder unit. encoder_paddings : list of list of int Padding/dilation value for each encoder unit. encoder_init_block_channels : int Number of output channels for the encoder initial unit. encoder_final_block_channels : int Number of output channels for the encoder final unit. refinement_units : int Number of refinement blocks in the decoder. calc_3d_features : bool Whether to calculate 3D features. return_heatmap : bool, default True Whether to return only heatmap. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (256, 192) Spatial size of the expected input image. keypoints : int, default 19 Number of keypoints. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, encoder_channels, encoder_paddings, encoder_init_block_channels, encoder_final_block_channels, refinement_units, calc_3d_features, return_heatmap=True, in_channels=3, in_size=(368, 368), keypoints=19, data_format="channels_last", **kwargs): super(LwOpenPose, self).__init__(**kwargs) assert (in_channels == 3) self.in_size = in_size self.keypoints = keypoints self.data_format = data_format self.return_heatmap = return_heatmap self.calc_3d_features = calc_3d_features num_heatmap_paf = 3 * keypoints self.encoder = SimpleSequential(name="encoder") backbone = SimpleSequential(name="backbone") backbone.add(conv3x3_block( in_channels=in_channels, out_channels=encoder_init_block_channels, strides=2, data_format=data_format, name="init_block")) in_channels = encoder_init_block_channels for i, channels_per_stage in enumerate(encoder_channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 padding = encoder_paddings[i][j] stage.add(dwsconv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=strides, padding=padding, dilation=padding, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels backbone.add(stage) self.encoder.add(backbone) self.encoder.add(LwopEncoderFinalBlock( in_channels=in_channels, out_channels=encoder_final_block_channels, data_format=data_format, name="final_block")) in_channels = encoder_final_block_channels self.decoder = SimpleSequential(name="decoder") self.decoder.add(LwopDecoderInitBlock( in_channels=in_channels, keypoints=keypoints, data_format=data_format, name="init_block")) in_channels = encoder_final_block_channels + num_heatmap_paf for i in range(refinement_units): self.decoder.add(LwopDecoderUnit( in_channels=in_channels, keypoints=keypoints, data_format=data_format, name="unit{}".format(i + 1))) self.decoder.add(LwopDecoderFinalBlock( in_channels=in_channels, keypoints=keypoints, bottleneck_factor=2, calc_3d_features=calc_3d_features, data_format=data_format, name="final_block")) def call(self, x, training=None): # print("**> x.shape={}".format(x.shape)) x = self.encoder(x, training=training) x = self.decoder(x, training=training) if self.return_heatmap: return x else: return x def get_lwopenpose(calc_3d_features, keypoints, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create Lightweight OpenPose 2D/3D model with specific parameters. Parameters: ---------- calc_3d_features : bool, default False Whether to calculate 3D features. keypoints : int Number of keypoints. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ encoder_channels = [[64], [128, 128], [256, 256, 512, 512, 512, 512, 512, 512]] encoder_paddings = [[1], [1, 1], [1, 1, 1, 2, 1, 1, 1, 1]] encoder_init_block_channels = 32 encoder_final_block_channels = 128 refinement_units = 1 net = LwOpenPose( encoder_channels=encoder_channels, encoder_paddings=encoder_paddings, encoder_init_block_channels=encoder_init_block_channels, encoder_final_block_channels=encoder_final_block_channels, refinement_units=refinement_units, calc_3d_features=calc_3d_features, keypoints=keypoints, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def lwopenpose2d_mobilenet_cmupan_coco(keypoints=19, data_format="channels_last", **kwargs): """ Lightweight OpenPose 2D model on the base of MobileNet for CMU Panoptic from 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004. Parameters: ---------- keypoints : int, default 19 Number of keypoints. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_lwopenpose(calc_3d_features=False, keypoints=keypoints, model_name="lwopenpose2d_mobilenet_cmupan_coco", data_format=data_format, **kwargs) def lwopenpose3d_mobilenet_cmupan_coco(keypoints=19, data_format="channels_last", **kwargs): """ Lightweight OpenPose 3D model on the base of MobileNet for CMU Panoptic from 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004. Parameters: ---------- keypoints : int, default 19 Number of keypoints. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_lwopenpose(calc_3d_features=True, keypoints=keypoints, model_name="lwopenpose3d_mobilenet_cmupan_coco", data_format=data_format, **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K # os.environ["TF_CUDNN_DETERMINISTIC"] = "1" # os.environ["TF_DETERMINISTIC_OPS"] = "1" data_format = "channels_last" # data_format = "channels_first" in_size_ = (368, 368) keypoints = 19 return_heatmap = True pretrained = False models = [ (lwopenpose2d_mobilenet_cmupan_coco, "2d", in_size_), (lwopenpose3d_mobilenet_cmupan_coco, "3d", in_size_), ] for model, model_dim, in_size in models: net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else (batch, in_size[0], in_size[1], 3)) y = net(x) assert (y.shape[0] == batch) keypoints_ = 3 * keypoints if model_dim == "2d" else 6 * keypoints if is_channels_first(data_format): assert ((y.shape[1] == keypoints_) and (y.shape[2] == x.shape[2] // 8) and (y.shape[3] == x.shape[3] // 8)) else: assert ((y.shape[3] == keypoints_) and (y.shape[1] == x.shape[1] // 8) and (y.shape[2] == x.shape[2] // 8)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != lwopenpose2d_mobilenet_cmupan_coco or weight_count == 4091698) assert (model != lwopenpose3d_mobilenet_cmupan_coco or weight_count == 5085983) if __name__ == "__main__": _test()
26,896
34.344284
119
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/jasperdr.py
""" Jasper DR (Dense Residual) for ASR, implemented in TensorFlow. Original paper: 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. """ __all__ = ['jasperdr10x5_en', 'jasperdr10x5_en_nr'] from .jasper import get_jasper from .common import is_channels_first def jasperdr10x5_en(classes=29, **kwargs): """ Jasper DR 10x5 model for English language from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_jasper(classes=classes, version=("jasper", "10x5"), use_dr=True, model_name="jasperdr10x5_en", **kwargs) def jasperdr10x5_en_nr(classes=29, **kwargs): """ Jasper DR 10x5 model for English language (with presence of noise) from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_jasper(classes=classes, version=("jasper", "10x5"), use_dr=True, model_name="jasperdr10x5_en_nr", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K import tensorflow as tf data_format = "channels_last" # data_format = "channels_first" pretrained = False audio_features = 64 classes = 29 models = [ jasperdr10x5_en, jasperdr10x5_en_nr, ] for model in models: net = model( in_channels=audio_features, pretrained=pretrained, data_format=data_format) batch = 3 seq_len = np.random.randint(60, 150, batch) seq_len_max = seq_len.max() + 2 x = tf.random.normal((batch, audio_features, seq_len_max) if is_channels_first(data_format) else (batch, seq_len_max, audio_features)) x_len = tf.convert_to_tensor(seq_len.astype(np.long)) y, y_len = net(x, x_len) assert (y.shape.as_list()[0] == batch) if is_channels_first(data_format): assert (y.shape.as_list()[1] == classes) assert (y.shape.as_list()[2] in [seq_len_max // 2, seq_len_max // 2 + 1]) else: assert (y.shape.as_list()[1] in [seq_len_max // 2, seq_len_max // 2 + 1]) assert (y.shape.as_list()[2] == classes) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != jasperdr10x5_en or weight_count == 332632349) assert (model != jasperdr10x5_en_nr or weight_count == 332632349) if __name__ == "__main__": _test()
3,268
33.410526
119
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/deeplabv3.py
""" DeepLabv3 for image segmentation, implemented in TensorFlow. Original paper: 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. """ __all__ = ['DeepLabv3', 'deeplabv3_resnetd50b_voc', 'deeplabv3_resnetd101b_voc', 'deeplabv3_resnetd152b_voc', 'deeplabv3_resnetd50b_coco', 'deeplabv3_resnetd101b_coco', 'deeplabv3_resnetd152b_coco', 'deeplabv3_resnetd50b_ade20k', 'deeplabv3_resnetd101b_ade20k', 'deeplabv3_resnetd50b_cityscapes', 'deeplabv3_resnetd101b_cityscapes'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent, is_channels_first, interpolate_im,\ get_im_size from .resnetd import resnetd50b, resnetd101b, resnetd152b class DeepLabv3FinalBlock(nn.Layer): """ DeepLabv3 final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bottleneck_factor : int, default 4 Bottleneck factor. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, bottleneck_factor=4, data_format="channels_last", **kwargs): super(DeepLabv3FinalBlock, self).__init__(**kwargs) assert (in_channels % bottleneck_factor == 0) self.data_format = data_format mid_channels = in_channels // bottleneck_factor self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") self.dropout = nn.Dropout( rate=0.1, name="dropout") self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, use_bias=True, data_format=data_format, name="conv2") def call(self, x, out_size, training=None): x = self.conv1(x, training=training) x = self.dropout(x, training=training) x = self.conv2(x) x = interpolate_im(x, out_size=out_size, data_format=self.data_format) return x class ASPPAvgBranch(nn.Layer): """ ASPP branch with average pooling. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. upscale_out_size : tuple of 2 int or None Spatial size of output image for the bilinear upsampling operation. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, upscale_out_size, data_format="channels_last", **kwargs): super(ASPPAvgBranch, self).__init__(**kwargs) self.upscale_out_size = upscale_out_size self.data_format = data_format self.pool = nn.AveragePooling2D( pool_size=1, data_format=data_format, name="pool") self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, data_format=data_format, name="conv") def call(self, x, training=None): in_size = self.upscale_out_size if self.upscale_out_size is not None else\ get_im_size(x, data_format=self.data_format) x = self.pool(x) x = self.conv(x, training=training) x = interpolate_im(x, out_size=in_size, data_format=self.data_format) return x class AtrousSpatialPyramidPooling(nn.Layer): """ Atrous Spatial Pyramid Pooling (ASPP) module. Parameters: ---------- in_channels : int Number of input channels. upscale_out_size : tuple of 2 int Spatial size of the input tensor for the bilinear upsampling operation. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, upscale_out_size, data_format="channels_last", **kwargs): super(AtrousSpatialPyramidPooling, self).__init__(**kwargs) atrous_rates = [12, 24, 36] assert (in_channels % 8 == 0) mid_channels = in_channels // 8 project_in_channels = 5 * mid_channels self.branches = Concurrent( data_format=data_format, name="branches") self.branches.add(conv1x1_block( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="branch1")) for i, atrous_rate in enumerate(atrous_rates): self.branches.add(conv3x3_block( in_channels=in_channels, out_channels=mid_channels, padding=atrous_rate, dilation=atrous_rate, data_format=data_format, name="branch{}".format(i + 2))) self.branches.add(ASPPAvgBranch( in_channels=in_channels, out_channels=mid_channels, upscale_out_size=upscale_out_size, data_format=data_format, name="branch5")) self.conv = conv1x1_block( in_channels=project_in_channels, out_channels=mid_channels, data_format=data_format, name="conv") self.dropout = nn.Dropout( rate=0.5, name="dropout") def call(self, x, training=None): x = self.branches(x, training=training) x = self.conv(x, training=training) x = self.dropout(x, training=training) return x class DeepLabv3(tf.keras.Model): """ DeepLabv3 model from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int, default 2048 Number of output channels form feature extractor. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default True Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (480, 480) Spatial size of the expected input image. classes : int, default 21 Number of segmentation classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, backbone, backbone_out_channels=2048, aux=False, fixed_size=True, in_channels=3, in_size=(480, 480), classes=21, data_format="channels_last", **kwargs): super(DeepLabv3, self).__init__(**kwargs) assert (in_channels > 0) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.classes = classes self.aux = aux self.fixed_size = fixed_size self.data_format = data_format self.backbone = backbone pool_out_size = (self.in_size[0] // 8, self.in_size[1] // 8) if fixed_size else None self.pool = AtrousSpatialPyramidPooling( in_channels=backbone_out_channels, upscale_out_size=pool_out_size, data_format=data_format, name="pool") pool_out_channels = backbone_out_channels // 8 self.final_block = DeepLabv3FinalBlock( in_channels=pool_out_channels, out_channels=classes, bottleneck_factor=1, data_format=data_format, name="final_block") if self.aux: aux_out_channels = backbone_out_channels // 2 self.aux_block = DeepLabv3FinalBlock( in_channels=aux_out_channels, out_channels=classes, bottleneck_factor=4, data_format=data_format, name="aux_block") def call(self, x, training=None): in_size = self.in_size if self.fixed_size else get_im_size(x, data_format=self.data_format) x, y = self.backbone(x, training=training) x = self.pool(x, training=training) x = self.final_block(x, in_size, training=training) if self.aux: y = self.aux_block(y, in_size, training=training) return x, y else: return x def get_deeplabv3(backbone, classes, aux=False, model_name=None, data_format="channels_last", pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create DeepLabv3 model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. classes : int Number of segmentation classes. aux : bool, default False Whether to output an auxiliary result. model_name : str or None, default None Model name for loading pretrained model. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ net = DeepLabv3( backbone=backbone, classes=classes, aux=aux, data_format=data_format, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root), by_name=True, skip_mismatch=True) return net def deeplabv3_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs): """ DeepLabv3 model on the base of ResNet(D)-50b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,), data_format=data_format).features del backbone.children[-1] return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_voc", data_format=data_format, **kwargs) def deeplabv3_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs): """ DeepLabv3 model on the base of ResNet(D)-101b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,), data_format=data_format).features del backbone.children[-1] return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_voc", data_format=data_format, **kwargs) def deeplabv3_resnetd152b_voc(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs): """ DeepLabv3 model on the base of ResNet(D)-152b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,), data_format=data_format).features del backbone.children[-1] return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd152b_voc", data_format=data_format, **kwargs) def deeplabv3_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs): """ DeepLabv3 model on the base of ResNet(D)-50b for COCO from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,), data_format=data_format).features del backbone.children[-1] return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_coco", data_format=data_format, **kwargs) def deeplabv3_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs): """ DeepLabv3 model on the base of ResNet(D)-101b for COCO from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,), data_format=data_format).features del backbone.children[-1] return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_coco", data_format=data_format, **kwargs) def deeplabv3_resnetd152b_coco(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs): """ DeepLabv3 model on the base of ResNet(D)-152b for COCO from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,), data_format=data_format).features del backbone.children[-1] return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd152b_coco", data_format=data_format, **kwargs) def deeplabv3_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, data_format="channels_last", **kwargs): """ DeepLabv3 model on the base of ResNet(D)-50b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 150 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,), data_format=data_format).features del backbone.children[-1] return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_ade20k", data_format=data_format, **kwargs) def deeplabv3_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, data_format="channels_last", **kwargs): """ DeepLabv3 model on the base of ResNet(D)-101b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 150 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,), data_format=data_format).features del backbone.children[-1] return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_ade20k", data_format=data_format, **kwargs) def deeplabv3_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, data_format="channels_last", **kwargs): """ DeepLabv3 model on the base of ResNet(D)-50b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,), data_format=data_format).features del backbone.children[-1] return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_cityscapes", data_format=data_format, **kwargs) def deeplabv3_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, data_format="channels_last", **kwargs): """ DeepLabv3 model on the base of ResNet(D)-101b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,), data_format=data_format).features del backbone.children[-1] return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_cityscapes", data_format=data_format, **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" in_size = (480, 480) aux = False pretrained = False models = [ (deeplabv3_resnetd50b_voc, 21), (deeplabv3_resnetd101b_voc, 21), (deeplabv3_resnetd152b_voc, 21), (deeplabv3_resnetd50b_coco, 21), (deeplabv3_resnetd101b_coco, 21), (deeplabv3_resnetd152b_coco, 21), (deeplabv3_resnetd50b_ade20k, 150), (deeplabv3_resnetd101b_ade20k, 150), (deeplabv3_resnetd50b_cityscapes, 19), (deeplabv3_resnetd101b_cityscapes, 19), ] for model, classes in models: net = model(pretrained=pretrained, in_size=in_size, aux=aux, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else (batch, in_size[0], in_size[1], 3)) ys = net(x) y = ys[0] if aux else ys assert (y.shape[0] == x.shape[0]) if is_channels_first(data_format): assert ((y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and (y.shape[3] == x.shape[3])) else: assert ((y.shape[3] == classes) and (y.shape[1] == x.shape[1]) and (y.shape[2] == x.shape[2])) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) if aux: assert (model != deeplabv3_resnetd50b_voc or weight_count == 42127850) assert (model != deeplabv3_resnetd101b_voc or weight_count == 61119978) assert (model != deeplabv3_resnetd152b_voc or weight_count == 76763626) assert (model != deeplabv3_resnetd50b_coco or weight_count == 42127850) assert (model != deeplabv3_resnetd101b_coco or weight_count == 61119978) assert (model != deeplabv3_resnetd152b_coco or weight_count == 76763626) assert (model != deeplabv3_resnetd50b_ade20k or weight_count == 42194156) assert (model != deeplabv3_resnetd101b_ade20k or weight_count == 61186284) assert (model != deeplabv3_resnetd50b_cityscapes or weight_count == 42126822) assert (model != deeplabv3_resnetd101b_cityscapes or weight_count == 61118950) else: assert (model != deeplabv3_resnetd50b_voc or weight_count == 39762645) assert (model != deeplabv3_resnetd101b_voc or weight_count == 58754773) assert (model != deeplabv3_resnetd152b_voc or weight_count == 74398421) assert (model != deeplabv3_resnetd50b_coco or weight_count == 39762645) assert (model != deeplabv3_resnetd101b_coco or weight_count == 58754773) assert (model != deeplabv3_resnetd152b_coco or weight_count == 74398421) assert (model != deeplabv3_resnetd50b_ade20k or weight_count == 39795798) assert (model != deeplabv3_resnetd101b_ade20k or weight_count == 58787926) assert (model != deeplabv3_resnetd50b_cityscapes or weight_count == 39762131) assert (model != deeplabv3_resnetd101b_cityscapes or weight_count == 58754259) if __name__ == "__main__": _test()
26,559
40.178295
119
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/fpenet.py
""" FPENet for image segmentation, implemented in TensorFlow. Original paper: 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1909.08599. """ __all__ = ['FPENet', 'fpenet_cityscapes'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1, conv1x1_block, conv3x3_block, SEBlock, InterpolationBlock, MultiOutputSequential,\ SimpleSequential, is_channels_first, get_channel_axis class FPEBlock(nn.Layer): """ FPENet block. Parameters: ---------- channels : int Number of input/output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, data_format="channels_last", **kwargs): super(FPEBlock, self).__init__(**kwargs) self.axis = get_channel_axis(data_format) dilations = [1, 2, 4, 8] assert (channels % len(dilations) == 0) mid_channels = channels // len(dilations) self.blocks = SimpleSequential(name="blocks") for i, dilation in enumerate(dilations): self.blocks.add(conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, groups=mid_channels, dilation=dilation, padding=dilation, data_format=data_format, name="block{}".format(i + 1))) def call(self, x, training=None): xs = tf.split(x, num_or_size_splits=len(self.blocks.children), axis=self.axis) ys = [] for bi, xsi in zip(self.blocks.children, xs): if len(ys) == 0: ys.append(bi(xsi, training=training)) else: ys.append(bi(xsi + ys[-1], training=training)) x = tf.concat(ys, axis=self.axis) return x class FPEUnit(nn.Layer): """ FPENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. bottleneck_factor : int Bottleneck factor. use_se : bool Whether to use SE-module. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, bottleneck_factor, use_se, data_format="channels_last", **kwargs): super(FPEUnit, self).__init__(**kwargs) self.resize_identity = (in_channels != out_channels) or (strides != 1) self.use_se = use_se mid1_channels = in_channels * bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid1_channels, strides=strides, data_format=data_format, name="conv1") self.block = FPEBlock( channels=mid1_channels, data_format=data_format, name="blocks") self.conv2 = conv1x1_block( in_channels=mid1_channels, out_channels=out_channels, activation=None, data_format=data_format, name="conv2") if self.use_se: self.se = SEBlock( channels=out_channels, data_format=data_format, name="se") if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, data_format=data_format, name="identity_conv") self.activ = nn.ReLU() def call(self, x, training=None): if self.resize_identity: identity = self.identity_conv(x, training=training) else: identity = x x = self.conv1(x, training=training) x = self.block(x, training=training) x = self.conv2(x, training=training) if self.use_se: x = self.se(x, training=training) x = x + identity x = self.activ(x) return x class FPEStage(nn.Layer): """ FPENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. layers : int Number of layers. use_se : bool Whether to use SE-module. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, layers, use_se, data_format="channels_last", **kwargs): super(FPEStage, self).__init__(**kwargs) self.use_block = (layers > 1) if self.use_block: self.down = FPEUnit( in_channels=in_channels, out_channels=out_channels, strides=2, bottleneck_factor=4, use_se=use_se, data_format=data_format, name="down") self.blocks = SimpleSequential(name="blocks") for i in range(layers - 1): self.blocks.add(FPEUnit( in_channels=out_channels, out_channels=out_channels, strides=1, bottleneck_factor=1, use_se=use_se, data_format=data_format, name="block{}".format(i + 1))) else: self.down = FPEUnit( in_channels=in_channels, out_channels=out_channels, strides=1, bottleneck_factor=1, use_se=use_se, data_format=data_format, name="down") def call(self, x, training=None): x = self.down(x, training=training) if self.use_block: y = self.blocks(x, training=training) x = x + y return x class MEUBlock(nn.Layer): """ FPENet specific mutual embedding upsample (MEU) block. Parameters: ---------- in_channels_high : int Number of input channels for x_high. in_channels_low : int Number of input channels for x_low. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels_high, in_channels_low, out_channels, data_format="channels_last", **kwargs): super(MEUBlock, self).__init__(**kwargs) self.data_format = data_format self.axis = get_channel_axis(data_format) self.conv_high = conv1x1_block( in_channels=in_channels_high, out_channels=out_channels, activation=None, data_format=data_format, name="conv_high") self.conv_low = conv1x1_block( in_channels=in_channels_low, out_channels=out_channels, activation=None, data_format=data_format, name="conv_low") self.pool = nn.GlobalAveragePooling2D( data_format=data_format, name="pool") self.conv_w_high = conv1x1( in_channels=out_channels, out_channels=out_channels, data_format=data_format, name="conv_w_high") self.conv_w_low = conv1x1( in_channels=1, out_channels=1, data_format=data_format, name="conv_w_low") self.relu = nn.ReLU() self.up = InterpolationBlock( scale_factor=2, data_format=data_format, name="up") def call(self, x_high, x_low, training=None): x_high = self.conv_high(x_high, training=training) x_low = self.conv_low(x_low, training=training) w_high = self.pool(x_high) axis = -1 if is_channels_first(self.data_format) else 1 w_high = tf.expand_dims(tf.expand_dims(w_high, axis=axis), axis=axis) w_high = self.conv_w_high(w_high) w_high = self.relu(w_high) w_high = tf.nn.sigmoid(w_high) w_low = tf.math.reduce_mean(x_low, axis=self.axis, keepdims=True) w_low = self.conv_w_low(w_low) w_low = tf.nn.sigmoid(w_low) x_high = self.up(x_high) x_high = x_high * w_low x_low = x_low * w_high out = x_high + x_low return out class FPENet(tf.keras.Model): """ FPENet model from 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1909.08599. Parameters: ---------- layers : list of int Number of layers for each unit. channels : list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. meu_channels : list of int Number of output channels for MEU blocks. use_se : bool Whether to use SE-module. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. classes : int, default 19 Number of segmentation classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, layers, channels, init_block_channels, meu_channels, use_se, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), classes=19, data_format="channels_last", **kwargs): super(FPENet, self).__init__(**kwargs) assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.classes = classes self.fixed_size = fixed_size self.data_format = data_format self.stem = conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, strides=2, data_format=data_format, name="stem") in_channels = init_block_channels self.encoder = MultiOutputSequential( return_last=False, name="encoder") for i, (layers_i, out_channels) in enumerate(zip(layers, channels)): stage = FPEStage( in_channels=in_channels, out_channels=out_channels, layers=layers_i, use_se=use_se, data_format=data_format, name="stage{}".format(i + 1)) stage.do_output = True self.encoder.add(stage) in_channels = out_channels self.meu1 = MEUBlock( in_channels_high=channels[-1], in_channels_low=channels[-2], out_channels=meu_channels[0], data_format=data_format, name="meu1") self.meu2 = MEUBlock( in_channels_high=meu_channels[0], in_channels_low=channels[-3], out_channels=meu_channels[1], data_format=data_format, name="meu2") in_channels = meu_channels[1] self.classifier = conv1x1( in_channels=in_channels, out_channels=classes, use_bias=True, data_format=data_format, name="classifier") self.up = InterpolationBlock( scale_factor=2, data_format=data_format, name="up") def call(self, x, training=None): x = self.stem(x, training=training) y = self.encoder(x, training=training) x = self.meu1(y[2], y[1], training=training) x = self.meu2(x, y[0], training=training) x = self.classifier(x) x = self.up(x) return x def get_fpenet(model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create FPENet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ width = 16 channels = [int(width * (2 ** i)) for i in range(3)] init_block_channels = width layers = [1, 3, 9] meu_channels = [64, 32] use_se = False net = FPENet( layers=layers, channels=channels, init_block_channels=init_block_channels, meu_channels=meu_channels, use_se=use_se, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root), by_name=True, skip_mismatch=True) return net def fpenet_cityscapes(classes=19, **kwargs): """ FPENet model for Cityscapes from 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1909.08599. Parameters: ---------- classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_fpenet(classes=classes, model_name="fpenet_cityscapes", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False in_size = (1024, 2048) classes = 19 models = [ fpenet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, data_format=data_format) batch = 4 x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else (batch, in_size[0], in_size[1], 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, classes, in_size[0], in_size[1]) if is_channels_first(data_format) else tuple(y.shape.as_list()) == (batch, in_size[0], in_size[1], classes)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != fpenet_cityscapes or weight_count == 115125) if __name__ == "__main__": _test()
15,897
31.378819
118
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/model_store.py
""" Model store which provides pretrained models. """ __all__ = ['get_model_file'] import os import zipfile import logging import hashlib _model_sha1 = {name: (error, checksum, repo_release_tag, ds, scale) for name, error, checksum, repo_release_tag, ds, scale in [ ('alexnet', '1609', '8ae4618efdb64d4b2b74fc4346ff917b3d51e3b2', 'v0.0.481', 'in1k', 0.875), ('alexnetb', '1706', 'df9cb6fd363e35b987a652a7ba706b6118eefff3', 'v0.0.485', 'in1k', 0.875), ('zfnet', '1717', '9500db3008e9ca8bc8f8de8101ec760e5ac8c05a', 'v0.0.422', 'in1k', 0.875), ('zfnetb', '1480', '47533f6a367312c8b2f56202aeae0be366013116', 'v0.0.422', 'in1k', 0.875), ('vgg11', '1017', 'c20556f4179e9311f28baa310702b6ea9265fee8', 'v0.0.422', 'in1k', 0.875), ('vgg13', '0951', '9fa609fcb5cb44caf2737d13c0accc07cdea0c9d', 'v0.0.422', 'in1k', 0.875), ('vgg16', '0834', 'ce78831f5d0640bd2fd619ba7d8d5027e62eb4f2', 'v0.0.422', 'in1k', 0.875), ('vgg19', '0768', 'ec5ac0baa5d49c041af48e67d34d1a89f1a72e7f', 'v0.0.422', 'in1k', 0.875), ('bn_vgg11', '0936', 'ef31b86687e83d413cb9c95c9ead657c3de9f21b', 'v0.0.422', 'in1k', 0.875), ('bn_vgg13', '0887', '2cccc7252ab4798fd9a6c3ce9d0b59717c47e40b', 'v0.0.422', 'in1k', 0.875), ('bn_vgg16', '0759', '1ca9dee8ef41ed84a216636d3c21380988ea1bf8', 'v0.0.422', 'in1k', 0.875), ('bn_vgg19', '0688', '81d25be84932c1c2848cabd4533423e3fd2cdbec', 'v0.0.422', 'in1k', 0.875), ('bn_vgg11b', '0975', 'aeaccfdc4a655d895e280165cf5be856472ca91f', 'v0.0.422', 'in1k', 0.875), ('bn_vgg13b', '0916', '64ddd3e799df62223424441e0acd22d8f6a1bef8', 'v0.0.488', 'in1k', 0.875), ('bn_vgg16b', '0776', '4e07f81c0a59865ca3546898aa02a445a0a00cf2', 'v0.0.489', 'in1k', 0.875), ('bn_vgg19b', '0733', '7a0920e8b4219585cee8cbbf7dcae5ae8a5e3f1f', 'v0.0.490', 'in1k', 0.875), ('bninception', '0865', '4cab3cce0eb1b79b872b189f5b0d9e4bb20f5ff4', 'v0.0.423', 'in1k', 0.875), ('resnet10', '1226', 'f91c99958e6093db8db8b380a3abf4fa011471fd', 'v0.0.569', 'in1k', 0.875), ('resnet12', '1201', 'b8f1c73d3314b646a6ee625f181b48ed1705d140', 'v0.0.485', 'in1k', 0.875), ('resnet14', '1091', 'b1d49202de5d355161ea8ccb0433af69e79901ed', 'v0.0.491', 'in1k', 0.875), ('resnetbc14b', '1037', '3b92ac6b051a55da27e68b4120793cf94147c253', 'v0.0.481', 'in1k', 0.875), ('resnet16', '0977', '6f72910952156543a0a6bf018c06af77318b0d6d', 'v0.0.493', 'in1k', 0.875), ('resnet18_wd4', '1745', '6e80041645de7ccbe156ce5bc3cbde909cee6b41', 'v0.0.422', 'in1k', 0.875), ('resnet18_wd2', '1283', '85a7caff1b2f8e355a1b8cb559e836d5b0c22d12', 'v0.0.422', 'in1k', 0.875), ('resnet18_w3d4', '1067', 'c1735b7de29016779c95e8e1481e5ded955b2b63', 'v0.0.422', 'in1k', 0.875), ('resnet18', '0870', 'e1d3f22e0387c8df1a5ad09b0efd6fc03916e4b0', 'v0.0.478', 'in1k', 0.875), ('resnet26', '0824', '0ed697161e8ed36b9ba1457e02878d984a088ab1', 'v0.0.489', 'in1k', 0.875), ('resnetbc26b', '0757', 'd70a2cadfb648f4c528704f1b9983f35af94de6f', 'v0.0.422', 'in1k', 0.875), ('resnet34', '0744', '7f7d70e7780e24b4cb60cefc895198cdb2b94665', 'v0.0.422', 'in1k', 0.875), ('resnetbc38b', '0677', '75e405a71f7227de5abb6a3c3c44d807b5963c44', 'v0.0.422', 'in1k', 0.875), ('resnet50', '0604', '728800bf57bd49f79671399fd4fd2b7fe9883f07', 'v0.0.422', 'in1k', 0.875), ('resnet50b', '0614', 'b2a49da61dce6309c75e77226bb047b43247da24', 'v0.0.422', 'in1k', 0.875), ('resnet101', '0518', '64320ac17d95157a805e30e4e9becfff31609292', 'v0.0.499', 'in1k', 0.875), ('resnet101b', '0511', 'e3076227a06b394aebcce6260c4afc665224c987', 'v0.0.422', 'in1k', 0.875), ('resnet152', '0444', '83163f546bc21bdff36ab27911285f4c3e45dcd6', 'v0.0.518', 'in1k', 0.875), ('resnet152b', '0431', 'b41ec90e12d4ceea1fdd2dec2c265969a65354e3', 'v0.0.517', 'in1k', 0.875), ('preresnet10', '1402', '541bf0e17a576b1676069563a1ed0de0fde4090f', 'v0.0.422', 'in1k', 0.875), ('preresnet12', '1320', '349c0df4a835699bdb045bedc3d38a7747cd21d4', 'v0.0.422', 'in1k', 0.875), ('preresnet14', '1224', '194b876203e467fbad2ccd2e03b90a79bfec8dac', 'v0.0.422', 'in1k', 0.875), ('preresnetbc14b', '1152', 'bc4e06ff3df99e7ffa0b2bdafa224796fa46f5a9', 'v0.0.422', 'in1k', 0.875), ('preresnet16', '1080', 'e00c40ee6d211f553bff0274771e5461150c69f4', 'v0.0.422', 'in1k', 0.875), ('preresnet18_wd4', '1780', '6ac7bc592983ced18c863f203db80bbd30e87a0b', 'v0.0.422', 'in1k', 0.875), ('preresnet18_wd2', '1314', '0c0528c8ae4943aa68ba0298209f2ed418e4f644', 'v0.0.422', 'in1k', 0.875), ('preresnet18_w3d4', '1070', '056b46c6e8ee2c86ebee560efea81dd43bbd5de6', 'v0.0.422', 'in1k', 0.875), ('preresnet18', '0955', '621ead9297b93673ec1c040e091efff9142313b5', 'v0.0.422', 'in1k', 0.875), ('preresnet26', '0837', '1a92a73217b1611c27b0c7082a018328264a65ff', 'v0.0.422', 'in1k', 0.875), ('preresnetbc26b', '0788', '1f737cd6c173ed8e5d9a8a69b35e1cf696ba622e', 'v0.0.422', 'in1k', 0.875), ('preresnet34', '0754', '3cc5ae1481512a8b206fb96ac8b632bcc5ee2db9', 'v0.0.422', 'in1k', 0.875), ('preresnetbc38b', '0636', '3396b49b5d20e7d362f9bd8879c00a21e8d67df1', 'v0.0.422', 'in1k', 0.875), ('preresnet50', '0625', '208605629d347a64b9a354f5ad7f441f736eb418', 'v0.0.422', 'in1k', 0.875), ('preresnet50b', '0634', '711227b1a93dd721dd3e37709456acfde969ba18', 'v0.0.422', 'in1k', 0.875), ('preresnet101', '0536', '2a62fe0a86c8a6a6ef0613dbd7aacfeee8300393', 'v0.0.504', 'in1k', 0.875), ('preresnet101b', '0539', '54d23aff956752be614c2ba66d8bff5477cf0367', 'v0.0.422', 'in1k', 0.875), ('preresnet152', '0446', '60b1d0972b2c70ede4111a48641ebdb9a6bb22aa', 'v0.0.510', 'in1k', 0.875), ('preresnet152b', '0439', '224a0baeb02d642716395f937316e071bd7ec25c', 'v0.0.523', 'in1k', 0.875), ('preresnet200b', '0448', '94152aa3e45bb3c923d3d09199dcde475058bbfe', 'v0.0.529', 'in1k', 0.875), ('preresnet269b', '0505', '035c04e12978735f564638e20d5394c034d44252', 'v0.0.545', 'in1k', 0.875), ('resnext14_16x4d', '1222', 'bff90c1d3dbde7ea4a6972bbacb619e252d344ea', 'v0.0.422', 'in1k', 0.875), ('resnext14_32x2d', '1247', '06aa6709cfb4cf23793eb0eee5d5fce42cfcb9cb', 'v0.0.422', 'in1k', 0.875), ('resnext14_32x4d', '1115', '3acdaec14a6c74284c03bc79ed47e9ecb394e652', 'v0.0.422', 'in1k', 0.875), ('resnext26_32x2d', '0851', '827791ccefaef07e5837f8fb1dae8733c871c029', 'v0.0.422', 'in1k', 0.875), ('resnext26_32x4d', '0718', '4f05525e34b9aeb82db2339f714b25055d94657b', 'v0.0.422', 'in1k', 0.875), ('resnext50_32x4d', '0547', '7f89b9f7e795af30763596e201971da5b3a3c4e2', 'v0.0.498', 'in1k', 0.875), ('resnext101_32x4d', '0418', '65226e18c3e2d54f8d82a891ffac1e29dea7ba9f', 'v0.0.530', 'in1k', 0.875), ('resnext101_64x4d', '0484', 'f8cf1580943cf3c6d6019f2fcc44f8adb857cb20', 'v0.0.422', 'in1k', 0.875), ('seresnet10', '1171', 'b7907036b3ad0b50d0203ca6c732ae3d617282c1', 'v0.0.486', 'in1k', 0.875), ('seresnet12', '1177', 'a05bd730dbfff6c939ce0f1c277059f247b1b1e4', 'v0.0.544', 'in1k', 0.875), ('seresnet14', '1101', 'c695031bf44e1a3387ac688f4e159b5d35de880f', 'v0.0.545', 'in1k', 0.875), ('seresnet16', '0971', '75ab24870f099f62fafc669cde719535be5e8585', 'v0.0.545', 'in1k', 0.875), ('seresnet18', '0921', '46c847abfdbd82c41a096e385163f21ae29ee200', 'v0.0.422', 'in1k', 0.875), ('seresnet26', '0807', '5178b3b1ea71bb118ffcc5d471f782f4ae6150d4', 'v0.0.422', 'in1k', 0.875), ('seresnetbc26b', '0684', '1460a381603c880f24fb0a42bfb6b79b850e2b28', 'v0.0.422', 'in1k', 0.875), ('seresnetbc38b', '0575', '18fcfcc1fee078382ad957e0f7d139ff596732e7', 'v0.0.422', 'in1k', 0.875), ('seresnet50', '0560', 'f1b84c8de0d25bbd4e92fcaefd9dd5012fa74bc4', 'v0.0.441', 'in1k', 0.875), ('seresnet50b', '0533', '256002c3b489d5b685ee1ab6b62303d7768c5816', 'v0.0.422', 'in1k', 0.875), ('seresnet101', '0440', '8d2f638235dc0f72bec4aff216e4e247f2264751', 'v0.0.533', 'in1k', 0.875), ('seresnet101b', '0464', 'a10be1d25d3112825e7b77277d6c56eb276dc799', 'v0.0.460', 'in1k', 0.875), ('seresnet152', '0429', '3fedbd3340f8610a4b4ae8e41a578a5c8588da8c', 'v0.0.538', 'in1k', 0.875), ('sepreresnet10', '1221', 'f0634079aac13e67afcac504709ff2ab33e08839', 'v0.0.544', 'in1k', 0.875), ('sepreresnet12', '1182', '9d52ac4f6fed4d819e03060e931e3b8365b7776b', 'v0.0.543', 'in1k', 0.875), ('sepreresnet16', '0956', '67583059c98b5114166b5101cea3850e5b996ef4', 'v0.0.543', 'in1k', 0.875), ('sepreresnet18', '0882', '9f039d7e462942253844a0d20bfb0ac4c95749ff', 'v0.0.543', 'in1k', 0.875), ('sepreresnet26', '0805', '042a83165dc8861ebf13a19f7ac387f21af23e7b', 'v0.0.543', 'in1k', 0.875), ('sepreresnetbc26b', '0640', 'a72bf8765efb1024bdd33eebe9920fd3e22d0bd6', 'v0.0.422', 'in1k', 0.875), ('sepreresnetbc38b', '0567', '17d10c63f096db1b7bfb59b6c6ffe14b9c669676', 'v0.0.422', 'in1k', 0.875), ('sepreresnet50b', '0531', '0882c0e9add4dad0304443fa8a704ee28c5e1c58', 'v0.0.461', 'in1k', 0.875), ('seresnext50_32x4d', '0434', 'c265c58c0c48103c1714bbb84b84987060991ec1', 'v0.0.505', 'in1k', 0.875), ('seresnext101_32x4d', '0446', 'e2f5ca884c866a7833b8bce2b1240a280e2caf4e', 'v0.0.529', 'in1k', 0.875), ('seresnext101_64x4d', '0407', 'd34b057ec428551d7c8fc666587ecf79d1b8eb62', 'v0.0.561', 'in1k', 0.875), ('senet16', '0805', 'f5f576568d02a572be5276b0b64e71ce4d1c4531', 'v0.0.422', 'in1k', 0.875), ('senet28', '0590', '667d56873564cc22b2f10478d5f3d55cda580c61', 'v0.0.422', 'in1k', 0.875), ('senet154', '0440', '69802af4dcb9036514c8ca80aad01fda7b66f7ee', 'v0.0.522', 'in1k', 0.875), ('resnestabc14', '0635', 'fa9e06db46e7bb0b5515d5d8eeb484a7608c8a05', 'v0.0.493', 'in1k', 0.875), ('resnesta18', '0690', '90c54f4bac84b6cadeb523f72a34e9806cd60744', 'v0.0.489', 'in1k', 0.875), ('resnestabc26', '0470', '05e07501fbac448d74348704105123c8c049cf84', 'v0.0.495', 'in1k', 0.875), ('resnesta50', '0439', '9ef477b8d0abd63be449baf1c2f6a3e29d78c56d', 'v0.0.531', 'in1k', 0.875), ('resnesta101', '0400', 'bb2a90f59e103c73bdfc75fc8b8d898325444069', 'v0.0.465', 'in1k', 0.875), ('resnesta152', '0450', 'b21a24bf84ed63cd43a36f880ddefec74dde13e9', 'v0.0.540', 'in1k', 0.875), ('resnesta200', '0338', '29a8a7453caee12992f9acc7f8a680a90706c4ed', 'v0.0.465', 'in1k', 0.875), ('resnesta269', '0336', '9a33e31b98b4e2473b17b55d0b19fbbdd415a30d', 'v0.0.465', 'in1k', 0.875), ('ibn_resnet50', '0562', '515dd2539c53defca766f20609d83fdd868a67f0', 'v0.0.495', 'in1k', 0.875), ('ibn_resnet101', '0489', '325dbc1ccc5fa2ac608bf8d146543fd1957c7d93', 'v0.0.552', 'in1k', 0.875), ('ibnb_resnet50', '0580', 'dbf9219a38cfa3a43b2cbb23efe4bb53f5f383be', 'v0.0.552', 'in1k', 0.875), ('ibn_resnext101_32x4d', '0490', '5e262e0dd5f7264a9f8047e070da21099f59e24d', 'v0.0.553', 'in1k', 0.875), ('ibn_densenet121', '0647', '830420b2efb5b701e5498b14c9aa25f4eb62a14e', 'v0.0.493', 'in1k', 0.875), ('ibn_densenet169', '0607', '74a97a40e8f2249d98d2c8559a49ec2a02336973', 'v0.0.500', 'in1k', 0.875), ('airnet50_1x64d_r2', '0528', '065bd85bd79e7b4fe58a284dffc00eec79672554', 'v0.0.522', 'in1k', 0.875), ('airnet50_1x64d_r16', '0550', '9500236cd5cce3d991b6d435a1950d23dab7a285', 'v0.0.519', 'in1k', 0.875), ('airnext50_32x4d_r2', '0514', '51089a6b08a25a0c9224a67320980245a6be30b6', 'v0.0.521', 'in1k', 0.875), ('bam_resnet50', '0537', 'a9720e151405bd9d1bf361fbcb817e431314904f', 'v0.0.499', 'in1k', 0.875), ('cbam_resnet50', '0486', 'f4495a7942b578bfeb4f2a4a221827e37124b094', 'v0.0.537', 'in1k', 0.875), ('scnet50', '0539', 'de94eb1b833b7620b1c9becd3f464d94f1901cf1', 'v0.0.493', 'in1k', 0.875), ('scnet101', '0473', '61bd73aff9e4ef095f9cdb1ff283899e4c8bbc09', 'v0.0.507', 'in1k', 0.875), ('scneta50', '0465', 'c1f8f295fc146d71ecfef113eeed16c2fc827f08', 'v0.0.472', 'in1k', 0.875), ('regnetx002', '1037', '599fc3d48f8a9c6275107ca918bc57b9f73e4de3', 'v0.0.475', 'in1k', 0.875), ('regnetx004', '0852', 'f0707cffa04f93eb436c1f19a9cb85f1baf08eea', 'v0.0.479', 'in1k', 0.875), ('regnetx006', '0759', '2e47a916950df2160c00feaf68db33eee5d28a7e', 'v0.0.482', 'in1k', 0.875), ('regnetx008', '0727', 'b19816adc82b4cf976dfab8ea46b437528b8480a', 'v0.0.482', 'in1k', 0.875), ('regnetx016', '0613', 'c51845ab253ed26ab19875f7482f8fd6ead049a5', 'v0.0.486', 'in1k', 0.875), ('regnetx032', '0568', '5f6287347fb2c294965aeae9a9ba494b9ea605c2', 'v0.0.492', 'in1k', 0.875), ('regnetx040', '0470', 'a3f5478840c94ed1685a6257c9738cae0debc240', 'v0.0.495', 'in1k', 0.875), ('regnetx064', '0457', 'fbb602bcc1842d26d550d4ec3dd222e05b548638', 'v0.0.535', 'in1k', 0.875), ('regnetx080', '0466', 'c101c5242bbd7cf52958c2f5f81fca09a4d21091', 'v0.0.515', 'in1k', 0.875), ('regnetx120', '0521', '7eb9799acf3949832867ba67e520662b54af66ff', 'v0.0.542', 'in1k', 0.875), ('regnetx160', '0458', '428d1dc3450a91fe261288386c0e07a5b491e33b', 'v0.0.532', 'in1k', 0.875), ('regnetx320', '0395', 'b7354725c1de9736f1d69d11c9f8b47e2d19314a', 'v0.0.548', 'in1k', 0.875), ('regnety002', '0953', '8935adbabb80191d7ca6ea9c498dd4cb5c62742b', 'v0.0.476', 'in1k', 0.875), ('regnety004', '0750', '65a10212ee15308e6bc7eff57a837d1abc139cb5', 'v0.0.481', 'in1k', 0.875), ('regnety006', '0700', 'af7dca34c8cbef4ab1ad85745bb0e1129cf32d7c', 'v0.0.483', 'in1k', 0.875), ('regnety008', '0646', '039229808da2f0f5e7e28c69dc04ba2901ce4736', 'v0.0.483', 'in1k', 0.875), ('regnety016', '0569', '285f4f57b3c79b15536e5f4592f93d51e46f96f4', 'v0.0.486', 'in1k', 0.875), ('regnety032', '0411', '7fde6bb0f2939cd8beb9d63b216bd9eb36bfece6', 'v0.0.473', 'in1k', 0.875), ('regnety040', '0468', '5df9e764d91a0295653eaddb6f8ae4f2da481a7a', 'v0.0.494', 'in1k', 0.875), ('regnety064', '0442', 'd7aaab23d152c3a31170d8a62b51615530ced837', 'v0.0.513', 'in1k', 0.875), ('regnety080', '0439', 'e9b5f16aa6b119e0db33be4e0bd358a3f6a669d3', 'v0.0.516', 'in1k', 0.875), ('regnety120', '0429', '1d6a27ba993b9c0f885fc8ff8569f9fa8dd2488b', 'v0.0.526', 'in1k', 0.875), ('regnety160', '0429', '5a9e11914a6b299ac404a2b4bbccdff32af6b9f1', 'v0.0.527', 'in1k', 0.875), ('regnety320', '0373', 'e5ac157ceace9b464e38def00ea8695a8f514e5b', 'v0.0.550', 'in1k', 0.875), ('pyramidnet101_a360', '0520', 'a0d721603c2b3b63415f5dcee5b9cc40204bb89d', 'v0.0.507', 'in1k', 0.875), ('diracnet18v2', '1113', '4d687b749342d23996d078a0984fd6affe63e47c', 'v0.0.429', 'in1k', 0.875), ('diracnet34v2', '0950', '161d97fda4104be091e918ea24c903bfffdc9b8d', 'v0.0.429', 'in1k', 0.875), ('densenet121', '0684', 'e9196a9c93534ca7b71ef136e5cc27f240370481', 'v0.0.422', 'in1k', 0.875), ('densenet161', '0591', '78224027b390f943b30130a7921ded2887776a77', 'v0.0.432', 'in1k', 0.875), ('densenet169', '0606', 'f708dc3310008e59814745ffc22ddf829fb2d25a', 'v0.0.422', 'in1k', 0.875), ('densenet201', '0591', '450c656858d693932253b486069690fe727f6f89', 'v0.0.426', 'in1k', 0.875), ('peleenet', '0982', '5f84bad24da6b8aa28c4f9eed534991476d54ba8', 'v0.0.496', 'in1k', 0.875), ('wrn50_2', '0608', '646ef77ea91cc2105cfd55e9587f2a2d29f10e98', 'v0.0.520', 'in1k', 0.875), ('drnc26', '0710', '4797ca29cd710769b8361879e47c14dec3a4718c', 'v0.0.508', 'in1k', 0.875), ('drnc42', '0612', 'afd99699ae54a6ad13d09869fbe97d569725174d', 'v0.0.556', 'in1k', 0.875), ('drnc58', '0517', '2e3cae869e6fa62c9895a94d78484421a2b0c58d', 'v0.0.559', 'in1k', 0.875), ('drnd22', '0747', '99f944258689018459cff2d2e920a6e10ddd3f09', 'v0.0.498', 'in1k', 0.875), ('drnd38', '0623', '63255b31333cf7ed685f01ad18444eeb5662f1a3', 'v0.0.552', 'in1k', 0.875), ('drnd54', '0498', 'b724009abdcf2e19eeb08e5b16203bebe6884b03', 'v0.0.554', 'in1k', 0.875), ('drnd105', '0489', '3b0e701c0a9c8bf70bbd1023a7820332e0c3b57b', 'v0.0.564', 'in1k', 0.875), ('dpn68', '0658', '5b70b7b86c33c3dfb04f5fa189e5d501e8804499', 'v0.0.427', 'in1k', 0.875), ('dpn98', '0426', '5d6dd2ed646c9bfa2c57497fca670f989b4113a5', 'v0.0.540', 'in1k', 0.875), ('dpn131', '0477', '282d2d194252b222fdbec3d97d30f2ecc4b661e9', 'v0.0.534', 'in1k', 0.875), ('darknet_tiny', '1745', 'd30be41aad15edf40dfed0bbf53d0e68c520f9f3', 'v0.0.422', 'in1k', 0.875), ('darknet_ref', '1671', 'b4991f6b58ae95118aa9ea84cae4a27e328196b5', 'v0.0.422', 'in1k', 0.875), ('darknet53', '0554', '118630cc9e4e262609ed9b654d2e089594840041', 'v0.0.501', 'in1k', 0.875), ('bagnet9', '2541', 'f2c2b3c2282d510792dc08b4f8ea212a2de9596d', 'v0.0.553', 'in1k', 0.875), ('bagnet17', '1523', 'f21a35804ecea7bee12f7ab7e7f9dfbe1bef98b1', 'v0.0.558', 'in1k', 0.875), ('bagnet33', '1041', '29b0a0634dec61d81619f63630f3e9bb45e8e13e', 'v0.0.561', 'in1k', 0.875), ('dla34', '0705', 'ade65c1687c187de7e6c7a8d8c9654a0b836c160', 'v0.0.486', 'in1k', 0.875), ('dla46c', '1287', 'dfcae3b549121205008235fd7e59793b394f8998', 'v0.0.427', 'in1k', 0.875), ('dla46xc', '1229', 'a858beca359f41cfe836cec6d30b01ba98109d06', 'v0.0.427', 'in1k', 0.875), ('dla60', '0553', '61a8f4e7c65ef48b6a6cfc715352a4b6573c4460', 'v0.0.494', 'in1k', 0.875), ('dla60x', '0550', 'b692a2267eb3330e36bf9ce4cf3943c9118ed9ae', 'v0.0.493', 'in1k', 0.875), ('dla60xc', '1075', 'a7850f0307de77fcce42afdbb7070776b7c219ca', 'v0.0.427', 'in1k', 0.875), ('dla102', '0517', '9bebb44b573356606e41cf16c2f1b3298e39106b', 'v0.0.505', 'in1k', 0.875), ('dla102x', '0474', 'dfef2c8122a48cdd24acbc5f41720f30edb9893c', 'v0.0.528', 'in1k', 0.875), ('dla102x2', '0428', 'e05fb468d1af87887e1b5a3b27f4429577497b8f', 'v0.0.542', 'in1k', 0.875), ('dla169', '0462', '24ee0a54909b9c93ed4be47284b9614ea095a7d4', 'v0.0.539', 'in1k', 0.875), ('dicenet_wd5', '3116', '6fbe46e2756bbaab61bd772532135fc1e10138ba', 'v0.0.497', 'in1k', 0.875), ('dicenet_wd2', '2341', '7e405ff3c91a199e13cb11cba0ea003e98f4cb56', 'v0.0.561', 'in1k', 0.875), ('dicenet_w3d4', '1666', '9237aa1696a5e1925e7650e1ace0b78c3a059bed', 'v0.0.567', 'in1k', 0.875), ('dicenet_w1', '1439', '675507ad8fd2d5e932fa75dde3d37aaa00625ef9', 'v0.0.513', 'in1k', 0.875), ('dicenet_w5d4', '1286', '353f46cc9731a046466f6b5ea1df0beb4a4d96f3', 'v0.0.515', 'in1k', 0.875), ('dicenet_w3d2', '1173', '2d5ba3eb216b1fd4399e5c91136d19b66bb11dcb', 'v0.0.522', 'in1k', 0.875), ('dicenet_w7d8', '1131', '008459595b70042884480f4946fee807384e781e', 'v0.0.527', 'in1k', 0.875), ('dicenet_w2', '0955', '6301dc901a2e92f0591eed98d14a1b5d1df6de39', 'v0.0.569', 'in1k', 0.875), ('hrnet_w18_small_v1', '0871', '6ae644af92de5cb12ba6ccae0d25dc2b86204ada', 'v0.0.492', 'in1k', 0.875), ('hrnet_w18_small_v2', '0602', 'e9db4e0ccd82efbebdba9c5b03cd58b9aa99ea51', 'v0.0.499', 'in1k', 0.875), ('hrnetv2_w18', '0504', '5e025eddf2918e1592d2ca39f585b8beae5f2461', 'v0.0.508', 'in1k', 0.875), ('hrnetv2_w30', '0508', 'f498640226679bee7792f50a6da403cd3836ba44', 'v0.0.525', 'in1k', 0.875), ('hrnetv2_w32', '0495', 'ab96120c9db009f0b7c8bb9310ac67b3113e7502', 'v0.0.528', 'in1k', 0.875), ('hrnetv2_w40', '0480', '783aff21a18a035362527464b47b72277ceb6ed1', 'v0.0.534', 'in1k', 0.875), ('hrnetv2_w44', '0488', '14fcc1a2c49eb808782f808b84a125629a677e29', 'v0.0.541', 'in1k', 0.875), ('hrnetv2_w48', '0481', '7b8bb74f095831ebc2c71f2bf9c765800ed36df5', 'v0.0.541', 'in1k', 0.875), ('hrnetv2_w64', '0479', '0bd946ed4729dc338f6299e403f348bcfda2ea8a', 'v0.0.543', 'in1k', 0.875), ('vovnet27s', '1154', 'f36b2e780f19e2e6638d723a6572c887118feffb', 'v0.0.551', 'in1k', 0.875), ('vovnet39', '0694', 'ae8d6df08237ff093b87ee9864902d2a64ad3fb6', 'v0.0.493', 'in1k', 0.875), ('vovnet57', '0623', '16133ef5c2fb5472cd91ebe5733b5451b9e6e493', 'v0.0.505', 'in1k', 0.875), ('selecsls42b', '0598', '6003cd2dc23f95da22760bd04299505f55cf9504', 'v0.0.493', 'in1k', 0.875), ('selecsls60', '0513', '1f2a07e410649548cca171c91be161c5434d5a38', 'v0.0.496', 'in1k', 0.875), ('selecsls60b', '0538', '5e4cdf6504c2dede81ee416d09d50e07ea0599fc', 'v0.0.495', 'in1k', 0.875), ('hardnet39ds', '0871', '0bd9fa5e72e0a80229534c195f160f5d6e603763', 'v0.0.485', 'in1k', 0.875), ('hardnet68ds', '0741', '371ee29a54f43a89fa2c4803b28fd1bc6d03d25d', 'v0.0.487', 'in1k', 0.875), ('hardnet68', '0704', 'c968512e936a77c8fd40e2743e46fbe74318d944', 'v0.0.557', 'in1k', 0.875), ('hardnet85', '0569', '28a9588e6edfc424e6b2ffc5b3fdbc97fe73f0d1', 'v0.0.495', 'in1k', 0.875), ('squeezenet_v1_0', '1760', 'd13ba73265325f21eb34e782989a7269cad406c6', 'v0.0.422', 'in1k', 0.875), ('squeezenet_v1_1', '1742', '95b614487f1f0572bd0dba18e0fc6d63df3a6bfc', 'v0.0.422', 'in1k', 0.875), ('squeezeresnet_v1_0', '1783', 'db620d998257c84fd6d5e80bba48cc1022febda3', 'v0.0.422', 'in1k', 0.875), ('squeezeresnet_v1_1', '1789', '13d6bc6bd85adf83ef55325443495feb07c5788f', 'v0.0.422', 'in1k', 0.875), ('sqnxt23_w1', '1861', '379975ebe54b180f52349c3737b17ea7b2613953', 'v0.0.422', 'in1k', 0.875), ('sqnxt23v5_w1', '1762', '153b4ce73714d2ecdca294efb365ab9c026e2f41', 'v0.0.422', 'in1k', 0.875), ('sqnxt23_w3d2', '1334', 'a2ba956cfeed0b4bbfc37776c6a1cd5ca13d9345', 'v0.0.422', 'in1k', 0.875), ('sqnxt23v5_w3d2', '1284', '72efaa710f0f1645cb220cb9950b3660299f2bed', 'v0.0.422', 'in1k', 0.875), ('sqnxt23_w2', '1069', 'f43dee19c527460f9815fc4e5eeeaef99fae4df3', 'v0.0.422', 'in1k', 0.875), ('sqnxt23v5_w2', '1026', 'da80c6407a4c18be31bcdd08356666942a9ef2b4', 'v0.0.422', 'in1k', 0.875), ('shufflenet_g1_wd4', '3681', '04a9e2d4ada22b3d317e2fc8b7d4ec11865c414f', 'v0.0.422', 'in1k', 0.875), ('shufflenet_g3_wd4', '3618', 'c9aad0f08d129726bbc19219c9773b38cf38825e', 'v0.0.422', 'in1k', 0.875), ('shufflenet_g1_wd2', '2236', '082db702c422d8bce12d4d79228de56f088a420d', 'v0.0.422', 'in1k', 0.875), ('shufflenet_g3_wd2', '2059', 'e3aefeeb36c20e325d0c7fe46afc60484167609d', 'v0.0.422', 'in1k', 0.875), ('shufflenet_g1_w3d4', '1679', 'a1cc5da3a288299a33353f697ed0297328dc3e95', 'v0.0.422', 'in1k', 0.875), ('shufflenet_g3_w3d4', '1611', '89546a05f499f0fdf96dade0f3db430f92c5920d', 'v0.0.422', 'in1k', 0.875), ('shufflenet_g1_w1', '1348', '52ddb20fd7ff288ae30a17757efda4653c09d5ca', 'v0.0.422', 'in1k', 0.875), ('shufflenet_g2_w1', '1333', '2a8ba6928e6fac05a5fe8911a9a175268eb18382', 'v0.0.422', 'in1k', 0.875), ('shufflenet_g3_w1', '1326', 'daaec8b84572023c1352e11830d296724123408e', 'v0.0.422', 'in1k', 0.875), ('shufflenet_g4_w1', '1313', '35dbd6b9fb8bc3e97367ea210abbd61da407f226', 'v0.0.422', 'in1k', 0.875), ('shufflenet_g8_w1', '1322', '449fb27659101a2cf0a87c90e33f4632d1c5e9f2', 'v0.0.422', 'in1k', 0.875), ('shufflenetv2_wd2', '1843', 'd492d721d3167cd64ab1c2a1f33f3ca5f6dec7c3', 'v0.0.422', 'in1k', 0.875), ('shufflenetv2_w1', '1135', 'dae13ee9f24c89cd1ea12a58fb90b967223c8e2e', 'v0.0.422', 'in1k', 0.875), ('shufflenetv2_w3d2', '0923', 'ea615baab737fca3a3d90303844b4a2922ea2c62', 'v0.0.422', 'in1k', 0.875), ('shufflenetv2_w2', '0821', '6ccac868f595e4618ca7e5f67f7c113f021ffad4', 'v0.0.422', 'in1k', 0.875), ('shufflenetv2b_wd2', '1784', 'd5644a6ab8fcb6ff04f30a2eb862ebd2de92b94c', 'v0.0.422', 'in1k', 0.875), ('shufflenetv2b_w1', '1104', 'b7db0ca041e996ee76fec7f126dc39c4e5120e82', 'v0.0.422', 'in1k', 0.875), ('shufflenetv2b_w3d2', '0877', '9efb13f7d795d63c8fbee736622b9f1940dd5dd5', 'v0.0.422', 'in1k', 0.875), ('shufflenetv2b_w2', '0808', 'ba5c7ddcd8f7da3719f5d1de71d5fd30130d59d9', 'v0.0.422', 'in1k', 0.875), ('menet108_8x1_g3', '2039', '1a8cfc9296011cd994eb48e75e24c33ecf6580f5', 'v0.0.422', 'in1k', 0.875), ('menet128_8x1_g4', '1918', '7fb59f0a8d3e1f490c26546dfe93ea29ebd79c2b', 'v0.0.422', 'in1k', 0.875), ('menet160_8x1_g8', '2034', '3cf9eb2aa2d4e067aa49ce32e7a41e9db5262493', 'v0.0.422', 'in1k', 0.875), ('menet228_12x1_g3', '1291', '21bd19bf0adb73b10cb04ccce8688f119467a114', 'v0.0.422', 'in1k', 0.875), ('menet256_12x1_g4', '1217', 'd9f2e10e6402e5ee2aec485da07da72edf25f790', 'v0.0.422', 'in1k', 0.875), ('menet348_12x1_g3', '0937', 'cee7691c710f5c453b63ef9e8c3e15e699b004bb', 'v0.0.422', 'in1k', 0.875), ('menet352_12x1_g8', '1167', '54a916bcc3920c6ef24243c8c73604b25d728a6d', 'v0.0.422', 'in1k', 0.875), ('menet456_24x1_g3', '0779', '2a70b14bd17e8d4692f15f2f8e9d181e7d95b971', 'v0.0.422', 'in1k', 0.875), ('mobilenet_wd4', '2213', 'ad04596aa730e5bb4429115df70504c5a7dd5969', 'v0.0.422', 'in1k', 0.875), ('mobilenet_wd2', '1333', '01395e1b9e2a54065aafcc8b4c419644e7f6a655', 'v0.0.422', 'in1k', 0.875), ('mobilenet_w3d4', '1051', '7832561b956f0d763b002fbd9f2f880bbb712885', 'v0.0.422', 'in1k', 0.875), ('mobilenet_w1', '0866', '6939232b46fb98c8a9209d66368d630bb50941ed', 'v0.0.422', 'in1k', 0.875), ('mobilenetb_wd4', '2169', '4aba9700274c1db221480b28944e639101ca082f', 'v0.0.481', 'in1k', 0.875), ('mobilenetb_wd2', '1269', '4ebf1936c541195bf47644b662f48e4a3dc3a8fd', 'v0.0.480', 'in1k', 0.875), ('mobilenetb_w3d4', '1018', '2c5ff66f431af9317c752cfdc7314768c8aca813', 'v0.0.481', 'in1k', 0.875), ('mobilenetb_w1', '0789', 'fdd5af09ab32014d92466437b11d968793032c93', 'v0.0.489', 'in1k', 0.875), ('fdmobilenet_wd4', '3062', '36aa16df43b344f42d6318cc840a81702951a033', 'v0.0.422', 'in1k', 0.875), ('fdmobilenet_wd2', '1977', '34541b84660b4e812830620c5d48df7c7a142078', 'v0.0.422', 'in1k', 0.875), ('fdmobilenet_w3d4', '1597', '0123c0313194a3094ec006f757d93f59aad73c2b', 'v0.0.422', 'in1k', 0.875), ('fdmobilenet_w1', '1312', 'fa99fb8d728f66f68464221e049a33cd2b8bfc6a', 'v0.0.422', 'in1k', 0.875), ('mobilenetv2_wd4', '2413', 'c3705f55b0df68919fba7ed79204c5651f6f71b1', 'v0.0.422', 'in1k', 0.875), ('mobilenetv2_wd2', '1446', 'b0c9a98b85b579ba77c17d228ace399809c6ab43', 'v0.0.422', 'in1k', 0.875), ('mobilenetv2_w3d4', '1044', 'e122c73eae885d204bc2ba46fb013a9da5cb282f', 'v0.0.422', 'in1k', 0.875), ('mobilenetv2_w1', '0863', 'b32cede3b68f40f2ed0552dcdf238c70f82e5705', 'v0.0.422', 'in1k', 0.875), ('mobilenetv2b_wd4', '2341', '059d92447db63fb6c5fdbdbcdc5490d7fb3e57b5', 'v0.0.483', 'in1k', 0.875), ('mobilenetv2b_wd2', '1375', '55eb7d491f6a655ebcaa3d63c9ca66c992a1c484', 'v0.0.486', 'in1k', 0.875), ('mobilenetv2b_w3d4', '1066', 'bab6a262187971fe603e5ce3aced0ee8845199e7', 'v0.0.483', 'in1k', 0.875), ('mobilenetv2b_w1', '0891', 'eabc2c721a0133cceb7d13603577af371ee32037', 'v0.0.483', 'in1k', 0.875), ('mobilenetv3_large_w1', '0732', '2aaed9ccc47ffceb76324394bbbed2bf3f4a034b', 'v0.0.491', 'in1k', 0.875), ('igcv3_wd4', '2828', '309359dc5a0cd0439f2be5f629534aa3bdf2b4f9', 'v0.0.422', 'in1k', 0.875), ('igcv3_wd2', '1701', 'b952333ab2024f879d4bb9895331a617f2b957b5', 'v0.0.422', 'in1k', 0.875), ('igcv3_w3d4', '1100', '00294c7b1ab9dddf7ab2cef3e7ec0a627bd67b29', 'v0.0.422', 'in1k', 0.875), ('igcv3_w1', '0899', 'a0cb775dd5bb2c13dce35a21d6fd53a783959702', 'v0.0.422', 'in1k', 0.875), ('mnasnet_b1', '0722', '61d97108ec2b9e33cdfdb68de2da59646e02280c', 'v0.0.493', 'in1k', 0.875), ('mnasnet_a1', '0705', '0ea3bd76dc2b55d38ccd8dbcac77b88153953d1b', 'v0.0.486', 'in1k', 0.875), ('proxylessnas_cpu', '0751', '47e1431680e115462835e73ec21dec8b6e88eb13', 'v0.0.424', 'in1k', 0.875), ('proxylessnas_gpu', '0726', 'd536cb3e27a47a4a18aa8e230ebe6b4a8f748910', 'v0.0.424', 'in1k', 0.875), ('proxylessnas_mobile', '0783', 'da8cdb80c5bd618258c657ebd8506e1342eaeb0d', 'v0.0.424', 'in1k', 0.875), ('proxylessnas_mobile14', '0653', '478b58cdb6c94007f786ec06a9e71a8dbc14507f', 'v0.0.424', 'in1k', 0.875), ('fbnet_cb', '0765', '1f5ffd7c9b72de05daa1d3a0b4ae298e3d15f235', 'v0.0.486', 'in1k', 0.875), ('xception', '0520', '9b570c498e25b7d92fbbe30e7c0abea6b3c203f6', 'v0.0.544', 'in1k', 0.875), ('inceptionv3', '0536', '7a70b62fef648deadbca8a10974b4ff3faa06092', 'v0.0.552', 'in1k', 0.875), ('inceptionv4', '0506', '62135f80437e8d069ec6656545ccd1df2490d81c', 'v0.0.543', 'in1k', 0.875), ('inceptionresnetv1', '0480', 'e2968df03fbb249e8dc32c48667c92b26ded27fb', 'v0.0.552', 'in1k', 0.875), ('inceptionresnetv2', '0470', '7f761d8133fc70ec7194966e1fff4553eaad9f96', 'v0.0.547', 'in1k', 0.875), ('polynet', '0451', 'e752c86bbde4f5ce07ab6d079673a62a7565acf7', 'v0.0.428', 'in1k', 0.875), ('nasnet_4a1056', '0815', '5b38d08a6d911dfa2d9aef9d835ea1787c60c54e', 'v0.0.495', 'in1k', 0.875), ('nasnet_6a4032', '0427', '1f0d2198bffb71386290b9b4e2058af2610574d8', 'v0.0.428', 'in1k', 0.875), ('pnasnet5large', '0427', '90e804af249c36f5f4435eb58ee0f32debefb320', 'v0.0.428', 'in1k', 0.875), ('spnasnet', '0777', '774167df70df802852c26bd8ccc087aea971f190', 'v0.0.490', 'in1k', 0.875), ('efficientnet_b0', '0725', 'fc13925b2b95f5469aba2bb7b8472fdbabd663c3', 'v0.0.427', 'in1k', 0.875), ('efficientnet_b1', '0630', '82e0c512dc557ccb4eb3fbdabf48106988251d6d', 'v0.0.427', 'in1k', 0.882), ('efficientnet_b0b', '0668', '771272448df362b9637c7edf94292ab2c9676314', 'v0.0.429', 'in1k', 0.875), ('efficientnet_b1b', '0577', 'b294ee16111847f37129ff069f9911f76a2233d4', 'v0.0.429', 'in1k', 0.882), ('efficientnet_b2b', '0530', '55bcdc5d03493a581c3a3778b5ee6c08142718b4', 'v0.0.429', 'in1k', 0.890), ('efficientnet_b3b', '0469', 'b8210e1ac4f331b25b95c4a6d30e4b024d84ceb3', 'v0.0.429', 'in1k', 0.904), ('efficientnet_b4b', '0399', '5e35e9c56c3a0f705a44a38087e2084a25ee0a2e', 'v0.0.429', 'in1k', 0.922), ('efficientnet_b5b', '0343', '0ed0c69daa1d75e2da35f49ddea6bcfa0383727f', 'v0.0.429', 'in1k', 0.934), ('efficientnet_b6b', '0312', 'faf631041f84b19668eb207201ec13b2d405e702', 'v0.0.429', 'in1k', 0.942), ('efficientnet_b7b', '0315', '4024912ec1499b559de26b2ee7d7be1c2a3e53cf', 'v0.0.429', 'in1k', 0.949), ('efficientnet_b0c', '0646', '2bd0e2af1d275ab2046002719305bf517137f6df', 'v0.0.433', 'in1k', 0.875), ('efficientnet_b1c', '0582', 'a760b325d867a5aa4093ae69d68e8df04ed7730b', 'v0.0.433', 'in1k', 0.882), ('efficientnet_b2c', '0533', 'ea6ca9cf3c5179ad3927d7c3386c1c18c7183e24', 'v0.0.433', 'in1k', 0.890), ('efficientnet_b3c', '0464', '1c8fced86bc52d3d97fdce3750180d6b694f53c6', 'v0.0.433', 'in1k', 0.904), ('efficientnet_b4c', '0390', 'dc4379eac0dc4144260a270d4eb4ea3835394703', 'v0.0.433', 'in1k', 0.922), ('efficientnet_b5c', '0310', '80258ef75ea1b068b6ccf66420b8dd346c0bcdaa', 'v0.0.433', 'in1k', 0.934), ('efficientnet_b6c', '0286', '285f830add2ce100c6ab035f2a0caf49a33308ad', 'v0.0.433', 'in1k', 0.942), ('efficientnet_b7c', '0276', '1ffad4eca775d49ba48a0aa168a9c81649dab5b1', 'v0.0.433', 'in1k', 0.949), ('efficientnet_b8c', '0270', 'aa691b94070f49e2b7f3a0ac11bc5ddbdb18b1f6', 'v0.0.433', 'in1k', 0.954), ('efficientnet_edge_small_b', '0642', '1c03bb7355c6ab14374520743cc56e1ee22e773b', 'v0.0.434', 'in1k', 0.875), ('efficientnet_edge_medium_b', '0565', '73153b188d8b79cd8cc0ab45991561499df87838', 'v0.0.434', 'in1k', 0.882), ('efficientnet_edge_large_b', '0496', 'd72edce103b4bdac37afeabec281f1aedc9632bc', 'v0.0.434', 'in1k', 0.904), ('mixnet_s', '0707', 'f8ada6d8664edc6cc57838c164ee67d86df7f874', 'v0.0.493', 'in1k', 0.875), ('mixnet_m', '0632', '6c91c9672ac5dae4331847a429b2ac8ba8ef713a', 'v0.0.493', 'in1k', 0.875), ('mixnet_l', '0556', '1b72f9aa0f6e47a13f05bc76ee88d6478fc39e21', 'v0.0.500', 'in1k', 0.875), ('resneta10', '1161', '208ecb259aae2da60a11d7736edfcdcc7d1b0493', 'v0.0.484', 'in1k', 0.875), ('resnetabc14b', '0960', '96153aced03790d2b54ba62c8dce1da95f4f784a', 'v0.0.477', 'in1k', 0.875), ('resneta18', '0804', 'aa3ba975c91f1c4d1cb7f2bc29dc98a6d7ccf30c', 'v0.0.486', 'in1k', 0.875), ('resneta50b', '0538', '54936268ad9b2ed51c3d9262e4e446a9efd4de78', 'v0.0.492', 'in1k', 0.875), ('resneta101b', '0445', '1e0df949ecb3b65ab545ffaf14be8e550bdd37ab', 'v0.0.532', 'in1k', 0.875), ('resneta152b', '0426', '2e42f2ac3a3e3759866c89f8f9ccf1426f000be0', 'v0.0.524', 'in1k', 0.875), ('resnetd50b', '0549', '1c84294f68b78dc58e07496495be0f8ecd2f14e3', 'v0.0.447', 'in1k', 0.875), ('resnetd101b', '0459', '7cce7f1357a3de297f7000b33f505dc67c38fb96', 'v0.0.447', 'in1k', 0.875), ('resnetd152b', '0468', '4673f64c71cf438eeafc890b5a138e301437bf90', 'v0.0.447', 'in1k', 0.875), ('resnet20_cifar10', '0597', '451230e98c5da3cd24e364b76995cdf5bdd36b73', 'v0.0.438', 'cf', 0.0), ('resnet20_cifar100', '2964', '5fa28f78b6b33f507f6b79a41f7fca07f681e4a5', 'v0.0.438', 'cf', 0.0), ('resnet20_svhn', '0343', '3480eec0f2781350815d07aa57bb821ecadc8b69', 'v0.0.438', 'cf', 0.0), ('resnet56_cifar10', '0452', 'a39ad94af7aad7adf21f41436cb8d86a948c7e90', 'v0.0.438', 'cf', 0.0), ('resnet56_cifar100', '2488', '8e413ab97ce41f96e02888776bc9ec71df49d909', 'v0.0.438', 'cf', 0.0), ('resnet56_svhn', '0275', '5acc55374dab36f2ebe70948393112fad83c4b17', 'v0.0.438', 'cf', 0.0), ('resnet110_cifar10', '0369', 'c625643a3c10909cdfc6c955418f0fca174b8d01', 'v0.0.438', 'cf', 0.0), ('resnet110_cifar100', '2280', 'c248211b354f7058b3066c5fb4ad87b2d0bdb6a0', 'v0.0.438', 'cf', 0.0), ('resnet110_svhn', '0245', 'a07e849f5e3233ef458072a30d8cc04ae84ff054', 'v0.0.438', 'cf', 0.0), ('resnet164bn_cifar10', '0368', 'cf08cca79ac123304add47b3aaba11cb4c46a25b', 'v0.0.438', 'cf', 0.0), ('resnet164bn_cifar100', '2044', '1ba347905fe05d922c9ec5ba876611b6393c6c99', 'v0.0.438', 'cf', 0.0), ('resnet164bn_svhn', '0242', '1bfa8083c38c89c19a4e0b53f714876705624fa7', 'v0.0.438', 'cf', 0.0), ('resnet272bn_cifar10', '0333', 'c8b0a926aeba2cdd404454bb22a731a3aed5996c', 'v0.0.438', 'cf', 0.0), ('resnet272bn_cifar100', '2007', '5357e0df7431ce2fb41f748fa04454f5a7055d1c', 'v0.0.438', 'cf', 0.0), ('resnet272bn_svhn', '0243', 'e2a8e35588d6375815a9b633f66e019a393553f7', 'v0.0.438', 'cf', 0.0), ('resnet542bn_cifar10', '0343', 'c31829d4c5845f9604e1a0f5aec938f03fcc05c3', 'v0.0.438', 'cf', 0.0), ('resnet542bn_cifar100', '1932', '2db913a6e6e577a366e2ab30030b9e976a388008', 'v0.0.438', 'cf', 0.0), ('resnet542bn_svhn', '0234', '0d6759e722dd536b2ce16ef856b6926fba023c6d', 'v0.0.438', 'cf', 0.0), ('resnet1001_cifar10', '0328', '552ab287f0a8224ae960a4ec0b4aed0f309e6641', 'v0.0.438', 'cf', 0.0), ('resnet1001_cifar100', '1979', '75c8acac55fce2dfc5c3f56cd10dd0467e56ffd2', 'v0.0.438', 'cf', 0.0), ('resnet1001_svhn', '0241', 'c9a01550d011abc9e6bc14df63952715a88a506a', 'v0.0.438', 'cf', 0.0), ('resnet1202_cifar10', '0353', '3559a9431d3ddd3ef1ee24bf2baa1b7184a21108', 'v0.0.438', 'cf', 0.0), ('resnet1202_cifar100', '2156', '28fcf78635c21d23b018d70a812eeae2ae24ad39', 'v0.0.438', 'cf', 0.0), ('preresnet20_cifar10', '0651', 'd3e7771e923032393bb6fa88d62625f3da64d9fe', 'v0.0.439', 'cf', 0.0), ('preresnet20_cifar100', '3022', '447255f8c6ad79dc42a2644438e35bc39fdeed36', 'v0.0.439', 'cf', 0.0), ('preresnet20_svhn', '0322', '6dcae6129ca6839c35a1ae9b3d69c4d41591811d', 'v0.0.439', 'cf', 0.0), ('preresnet56_cifar10', '0449', 'b4bfdaa8eaa4370899d1fb0c3c360158cf3fa3f4', 'v0.0.439', 'cf', 0.0), ('preresnet56_cifar100', '2505', '180fc2081f3c694b0c3db2948cb05e06f1070ee2', 'v0.0.439', 'cf', 0.0), ('preresnet56_svhn', '0280', '6e074c73832de7afcb8e61405b2eb62bc969d35f', 'v0.0.439', 'cf', 0.0), ('preresnet110_cifar10', '0386', '287a4b0cdd424fdf29d862b411f556f3d8f57f98', 'v0.0.439', 'cf', 0.0), ('preresnet110_cifar100', '2267', 'ab677c09518f0b7aae855153fc820811bd530c28', 'v0.0.439', 'cf', 0.0), ('preresnet110_svhn', '0279', '226a0b342145852f4289630f6fd82d2c90f38e01', 'v0.0.439', 'cf', 0.0), ('preresnet164bn_cifar10', '0364', '29a459fad0f60028b48f1908970d3947728d76b0', 'v0.0.439', 'cf', 0.0), ('preresnet164bn_cifar100', '2018', 'c764970119e627e5c88fe3c7cb6a7d36cd7f29d0', 'v0.0.439', 'cf', 0.0), ('preresnet164bn_svhn', '0258', '2307c36f351e22d9bf0240fdcf5b5651dce03e57', 'v0.0.439', 'cf', 0.0), ('preresnet272bn_cifar10', '0325', '5bacdc955e8d800e08d6513a6ecd21ce79da6c84', 'v0.0.439', 'cf', 0.0), ('preresnet272bn_cifar100', '1963', '22e0919886949484354b5a18f6c87ab5aa33b61a', 'v0.0.439', 'cf', 0.0), ('preresnet272bn_svhn', '0234', '3451d5fbc8dfecf2da2e624319f0e0068091f358', 'v0.0.439', 'cf', 0.0), ('preresnet542bn_cifar10', '0314', 'd8324d47e327c92f3557db4ba806071041a56f69', 'v0.0.439', 'cf', 0.0), ('preresnet542bn_cifar100', '1871', '703875c6827c83e26e05cd3e516b5a3234d01747', 'v0.0.439', 'cf', 0.0), ('preresnet542bn_svhn', '0236', '5ca0759231c9a045df4ef40a47d8b81e624664f8', 'v0.0.439', 'cf', 0.0), ('preresnet1001_cifar10', '0265', '978844c1315a0a3f6261393bcc954cecb85c199a', 'v0.0.439', 'cf', 0.0), ('preresnet1001_cifar100', '1841', '7481e79c54d9a32d163c740eb53310c6a5f40b01', 'v0.0.439', 'cf', 0.0), ('preresnet1202_cifar10', '0339', 'ab04c456454c933245d91f36942166d45393a8bc', 'v0.0.439', 'cf', 0.0), ('resnext20_1x64d_cifar10', '0433', 'e0ab86674852a3c78f4a600e9e8ca50a06ff0bb9', 'v0.0.440', 'cf', 0.0), ('resnext20_1x64d_cifar100', '2197', '413945af9f271e173bb2085de38d65e98905f304', 'v0.0.440', 'cf', 0.0), ('resnext20_1x64d_svhn', '0298', '105736c8c2cb1bf8a4ac4538ccd7e139501095d6', 'v0.0.440', 'cf', 0.0), ('resnext20_2x32d_cifar10', '0453', '7aa966dd0803c3f731d0f858125baedca245cf86', 'v0.0.440', 'cf', 0.0), ('resnext20_2x32d_cifar100', '2255', 'bf34e56aea7d21fca0b99c14558d6b06aab1f94f', 'v0.0.440', 'cf', 0.0), ('resnext20_2x32d_svhn', '0296', 'b61e1395c12285ca0c765f3ddbfd8a5c4d252536', 'v0.0.440', 'cf', 0.0), ('resnext20_2x64d_cifar10', '0403', '367377ed36b429753d727369cba42db281b40443', 'v0.0.440', 'cf', 0.0), ('resnext20_2x64d_cifar100', '2060', '6eef33bcb44c73dfdfe51036f5d647b5eba286c5', 'v0.0.440', 'cf', 0.0), ('resnext20_2x64d_svhn', '0283', 'dedfbac24ad3e67c55609b79da689e01ad6ba759', 'v0.0.440', 'cf', 0.0), ('resnext20_4x16d_cifar10', '0470', '333e834da705f54958887ce7a34335b0e71fcfad', 'v0.0.440', 'cf', 0.0), ('resnext20_4x16d_cifar100', '2304', 'fa8d4e06a0455f49da492377be9fe90140795629', 'v0.0.440', 'cf', 0.0), ('resnext20_4x16d_svhn', '0317', 'cab6d9fd851d47f11863075e83dd699cddb21571', 'v0.0.440', 'cf', 0.0), ('resnext20_4x32d_cifar10', '0373', 'e4aa1b0dade046bbfc872f4c84ac5fe3bcbeda11', 'v0.0.440', 'cf', 0.0), ('resnext20_4x32d_cifar100', '2131', 'edabd5da34edfba348b8f1712bbb0dc3ce6c5a82', 'v0.0.440', 'cf', 0.0), ('resnext20_4x32d_svhn', '0298', '82b75cbb31f2ea3497548a19fdf1f5fb0531527c', 'v0.0.440', 'cf', 0.0), ('resnext20_8x8d_cifar10', '0466', '1dbd9f5e45f120c697d128558b4d263f2ac94f0e', 'v0.0.440', 'cf', 0.0), ('resnext20_8x8d_cifar100', '2282', '51922108355f86cb0131826715cef9e81513e399', 'v0.0.440', 'cf', 0.0), ('resnext20_8x8d_svhn', '0318', '6ef55252a46d6106a160d87da107a1293cbce654', 'v0.0.440', 'cf', 0.0), ('resnext20_8x16d_cifar10', '0404', '5329db5f6066a73e085805ab40969af31a43e4f7', 'v0.0.440', 'cf', 0.0), ('resnext20_8x16d_cifar100', '2172', '3665fda790f0164078ffd6403e022a0ba8186c47', 'v0.0.440', 'cf', 0.0), ('resnext20_8x16d_svhn', '0301', 'd1a547e4514e6338934b26c473061b49c669c632', 'v0.0.440', 'cf', 0.0), ('resnext20_16x4d_cifar10', '0404', 'c671993585f1cc878941475e87c266c8a1895ca8', 'v0.0.440', 'cf', 0.0), ('resnext20_16x4d_cifar100', '2282', 'e800aabb6ea23a0555d2ac5a1856d7d289a46bca', 'v0.0.440', 'cf', 0.0), ('resnext20_16x4d_svhn', '0321', '77a670a80e976b173272614cf9416e904f1defde', 'v0.0.440', 'cf', 0.0), ('resnext20_16x8d_cifar10', '0394', 'cf7c675c52499a714fb3391c0240c265d6f1bb01', 'v0.0.440', 'cf', 0.0), ('resnext20_16x8d_cifar100', '2173', '0a33029811f76f93e79b83bf6cb19d74711c2e5b', 'v0.0.440', 'cf', 0.0), ('resnext20_16x8d_svhn', '0293', '4ebac2762e92f1c12b28e3012c171333a63706e1', 'v0.0.440', 'cf', 0.0), ('resnext20_32x2d_cifar10', '0461', 'b05d34915134060c39ea4f6b9e356b539a1e147b', 'v0.0.440', 'cf', 0.0), ('resnext20_32x2d_cifar100', '2322', '2def8cc21fe9057a63aee6aef2c718720fd90230', 'v0.0.440', 'cf', 0.0), ('resnext20_32x2d_svhn', '0327', '0c099194b551bf0d72a0028a13a94a7ca277473b', 'v0.0.440', 'cf', 0.0), ('resnext20_32x4d_cifar10', '0420', '6011e9e91f901ab98107e451149065524d2acc30', 'v0.0.440', 'cf', 0.0), ('resnext20_32x4d_cifar100', '2213', '9508c15dddd01d0064938023904c6c23ad901da5', 'v0.0.440', 'cf', 0.0), ('resnext20_32x4d_svhn', '0309', 'c8a843e1a0ce40fe2f42e3406e671e9a0df55d82', 'v0.0.440', 'cf', 0.0), ('resnext20_64x1d_cifar10', '0493', 'a13300cea5f2c626c096ac1fbf9f707a6da46f0b', 'v0.0.440', 'cf', 0.0), ('resnext20_64x1d_cifar100', '2353', '91695baa3caba28fa7507b3ffa0629048e01aa6e', 'v0.0.440', 'cf', 0.0), ('resnext20_64x1d_svhn', '0342', 'a3bad459c16926727190d1875ae90e709d50145e', 'v0.0.440', 'cf', 0.0), ('resnext20_64x2d_cifar10', '0438', '3846d7a7ecea5fe4da1d0895da05b675b84e23d7', 'v0.0.440', 'cf', 0.0), ('resnext20_64x2d_cifar100', '2235', 'e4a559ccaba13da694828aca7f83bafc9e364dcd', 'v0.0.440', 'cf', 0.0), ('resnext20_64x2d_svhn', '0314', 'c755e25d61534ec355c2da1a458dc5772d1f790e', 'v0.0.440', 'cf', 0.0), ('resnext29_16x64d_cifar10', '0241', '712e474493fd9f504010ca0a8eb10a94431bffdb', 'v0.0.440', 'cf', 0.0), ('resnext29_16x64d_cifar100', '1693', '2df09272ed462101da32619e652074f8c1f3ec23', 'v0.0.440', 'cf', 0.0), ('resnext29_16x64d_svhn', '0268', 'c929fadabc9bd8c2b2e97d4e2703ec2fba31032b', 'v0.0.440', 'cf', 0.0), ('resnext29_32x4d_cifar10', '0315', '5ed2e0f0945e138c3aa0c9acc0c5fd08f2d840cd', 'v0.0.440', 'cf', 0.0), ('resnext29_32x4d_cifar100', '1950', 'e99791392f0930372efefbe0a54304230ac4cc90', 'v0.0.440', 'cf', 0.0), ('resnext29_32x4d_svhn', '0280', 'de6cba99c40a882e98d2ef002cc14d799f5bf8bc', 'v0.0.440', 'cf', 0.0), ('resnext56_1x64d_cifar10', '0287', '5da5fe18fdf2b55977266631e2eb4b7913e7d591', 'v0.0.440', 'cf', 0.0), ('resnext56_1x64d_cifar100', '1825', '727009516efca454a34a3e310608b45d4c9a4020', 'v0.0.440', 'cf', 0.0), ('resnext56_1x64d_svhn', '0242', 'dd7ac31ee1f1a0ffcd3049fc056e8e705cae93f0', 'v0.0.440', 'cf', 0.0), ('resnext56_2x32d_cifar10', '0301', '54d6f2df3a903cb23978cd674495ab1e8894ab09', 'v0.0.440', 'cf', 0.0), ('resnext56_2x32d_cifar100', '1786', '6639c30dd1bc152736c21c9de27823d0ce3b367c', 'v0.0.440', 'cf', 0.0), ('resnext56_2x32d_svhn', '0246', '61524d8aff0534121257ec5b8b65647cbdafda7f', 'v0.0.440', 'cf', 0.0), ('resnext56_4x16d_cifar10', '0311', '766ab89fccd5b2675d5d42a9372346fd7bf45b77', 'v0.0.440', 'cf', 0.0), ('resnext56_4x16d_cifar100', '1809', '61b41c3b953a4a7198dec6a379f789030a998e42', 'v0.0.440', 'cf', 0.0), ('resnext56_4x16d_svhn', '0244', 'b7ab24694a0c1f635fbb2b2e4130272b5e75b6bc', 'v0.0.440', 'cf', 0.0), ('resnext56_8x8d_cifar10', '0307', '685eab396974992f71402533be96229cdc3eb751', 'v0.0.440', 'cf', 0.0), ('resnext56_8x8d_cifar100', '1806', 'f3f80382faa7baadaef4e09fedb924b4d5deac78', 'v0.0.440', 'cf', 0.0), ('resnext56_8x8d_svhn', '0247', '85692d770f3bab690dc9aa57b4e3d9aa728121e9', 'v0.0.440', 'cf', 0.0), ('resnext56_16x4d_cifar10', '0312', '930e5d5baf62d2fe4e48afe7dbd928079fd5531a', 'v0.0.440', 'cf', 0.0), ('resnext56_16x4d_cifar100', '1824', '667ba1835c3db07e54ad4dfbc6ea99a0b12afd78', 'v0.0.440', 'cf', 0.0), ('resnext56_16x4d_svhn', '0256', '86f327a9652e79a4a38c0d6ebc9fda8f0a6c3ea4', 'v0.0.440', 'cf', 0.0), ('resnext56_32x2d_cifar10', '0314', '9e387e2e6c769802fbf7a911b67d2c490e14db85', 'v0.0.440', 'cf', 0.0), ('resnext56_32x2d_cifar100', '1860', '7a236896b7f00913f8a0846d39382d87bc56214c', 'v0.0.440', 'cf', 0.0), ('resnext56_32x2d_svhn', '0253', 'b93a0535890a340774a190fab2a521696b134600', 'v0.0.440', 'cf', 0.0), ('resnext56_64x1d_cifar10', '0341', 'bc7469474a3cf31622186aa86c0c837b9c05563a', 'v0.0.440', 'cf', 0.0), ('resnext56_64x1d_cifar100', '1816', '06c6c7a0bb97cd67360e624dd9ca3193969c3e06', 'v0.0.440', 'cf', 0.0), ('resnext56_64x1d_svhn', '0255', '9e9e3cc2bf26b8c691b5b2b12fb3908dd999f870', 'v0.0.440', 'cf', 0.0), ('resnext272_1x64d_cifar10', '0255', '6efe448a89da1340dca7158d12a0355d1b2d2d75', 'v0.0.440', 'cf', 0.0), ('resnext272_1x64d_cifar100', '1911', 'e9275c944ff841c29316a2728068a6162af39488', 'v0.0.440', 'cf', 0.0), ('resnext272_1x64d_svhn', '0234', '4d348e9ec9d261318d1264c61f4817de612797e4', 'v0.0.440', 'cf', 0.0), ('resnext272_2x32d_cifar10', '0274', '4e35f99476d34225bd07ed2f4274ed021fb635f3', 'v0.0.440', 'cf', 0.0), ('resnext272_2x32d_cifar100', '1834', '274ef60797974e3d7290644861facefa983bc7f2', 'v0.0.440', 'cf', 0.0), ('resnext272_2x32d_svhn', '0244', 'f792396540a630a0d51932f9c7557e5d96ddb66c', 'v0.0.440', 'cf', 0.0), ('seresnet20_cifar10', '0601', '2f392e4a48cffe1ff96b92ca28fd0f020e9d89aa', 'v0.0.442', 'cf', 0.0), ('seresnet20_cifar100', '2854', '598b585838afb8907e76c6e9af2b92417f5eeb08', 'v0.0.442', 'cf', 0.0), ('seresnet20_svhn', '0323', 'ef43ce80cc226dff6d7c0fd120daaa89fe353392', 'v0.0.442', 'cf', 0.0), ('seresnet56_cifar10', '0413', '0224e930258e0567cf18bd1b0f5ae8ffd85d6231', 'v0.0.442', 'cf', 0.0), ('seresnet56_cifar100', '2294', '9c86ec999dac74831ab3918682c1753fde447187', 'v0.0.442', 'cf', 0.0), ('seresnet56_svhn', '0264', 'a8fcc570f6ab95d188148f0070f714c052bcf0f3', 'v0.0.442', 'cf', 0.0), ('seresnet110_cifar10', '0363', '4c28f93f8fe23a216aba5cb80af8412023b42cdb', 'v0.0.442', 'cf', 0.0), ('seresnet110_cifar100', '2086', '6435b022d058e62f95bbd2bb6447cd76f0a14316', 'v0.0.442', 'cf', 0.0), ('seresnet110_svhn', '0235', '57751ac70c94c9bbe95a1229af30b5471db498b1', 'v0.0.442', 'cf', 0.0), ('seresnet164bn_cifar10', '0339', '64d051543b02cb26fb6a22220ad35bb5b80243e3', 'v0.0.442', 'cf', 0.0), ('seresnet164bn_cifar100', '1995', '121a777aa64b7249a9483baa1e8a677a7c9587df', 'v0.0.442', 'cf', 0.0), ('seresnet164bn_svhn', '0245', 'a19e2e88575459f35303a058e486a944e34f8379', 'v0.0.442', 'cf', 0.0), ('seresnet272bn_cifar10', '0339', 'baa561b6c4449558a11900ae24780d6fcdd9efdf', 'v0.0.442', 'cf', 0.0), ('seresnet272bn_cifar100', '1907', 'a29e50de59aac03cff1d657ce0653a02246c39dc', 'v0.0.442', 'cf', 0.0), ('seresnet272bn_svhn', '0238', '918ee0dea7a956bca36d23459e822488e3a0659e', 'v0.0.442', 'cf', 0.0), ('seresnet542bn_cifar10', '0347', 'e95ebdb9b79f4955731147c078e1607dd174ffe9', 'v0.0.442', 'cf', 0.0), ('seresnet542bn_cifar100', '1887', 'ddc4d5c89d56a0c560e5174194db071fcb960d81', 'v0.0.442', 'cf', 0.0), ('seresnet542bn_svhn', '0226', '5ec784aabe3030f519ca22821b7a58a30e0bf179', 'v0.0.442', 'cf', 0.0), ('sepreresnet20_cifar10', '0618', '22217b323af922b720bc044bce9556b0dde18d97', 'v0.0.443', 'cf', 0.0), ('sepreresnet20_cifar100', '2831', 'e8dab8b87dbe512dfabd7cdbaff9b08be81fb36b', 'v0.0.443', 'cf', 0.0), ('sepreresnet20_svhn', '0324', 'e7dbcc9678dfa8ce0b2699de601699d29a5cb868', 'v0.0.443', 'cf', 0.0), ('sepreresnet56_cifar10', '0451', '32637db56c6fed2a3d66778ee3335527f2d8e25d', 'v0.0.443', 'cf', 0.0), ('sepreresnet56_cifar100', '2305', 'aea4d90bc7fd0eb8f433e376d1aba8e3c0d1ac55', 'v0.0.443', 'cf', 0.0), ('sepreresnet56_svhn', '0271', 'ea024196ca9bd0ff331e8d8d3da376aecf9ea0c1', 'v0.0.443', 'cf', 0.0), ('sepreresnet110_cifar10', '0454', 'e317c56922fbf1cec478e46e49d6edd3c4ae3b03', 'v0.0.443', 'cf', 0.0), ('sepreresnet110_cifar100', '2261', '19a8d4a1563f8fb61c63a5c577f40f3363efec00', 'v0.0.443', 'cf', 0.0), ('sepreresnet110_svhn', '0259', '6291c548277580f90ed0e22845f06eb7b022f8f9', 'v0.0.443', 'cf', 0.0), ('sepreresnet164bn_cifar10', '0373', '253c0430d6e8d2ba9c4c5526beed3b2e90573fe4', 'v0.0.443', 'cf', 0.0), ('sepreresnet164bn_cifar100', '2005', '9c3ed25062e52a23f73600c1a0f99064f89b4a47', 'v0.0.443', 'cf', 0.0), ('sepreresnet164bn_svhn', '0256', 'c89523226a8a010459ebec9c48d940773946e7bf', 'v0.0.443', 'cf', 0.0), ('sepreresnet272bn_cifar10', '0339', '1ca0bed3b3ae20d55322fa2f75057edb744fb63d', 'v0.0.443', 'cf', 0.0), ('sepreresnet272bn_cifar100', '1913', 'eb75217f625dbc97af737e5878a9eab28fdf3b03', 'v0.0.443', 'cf', 0.0), ('sepreresnet272bn_svhn', '0249', '0a778e9d68f6921463563ef84054969221809aef', 'v0.0.443', 'cf', 0.0), ('sepreresnet542bn_cifar10', '0309', '7764e8bddba21c75b8f8d4775093721d859f850c', 'v0.0.443', 'cf', 0.0), ('sepreresnet542bn_cifar100', '1945', '969d2bf0a8d213757486e18c180ba14058e08eac', 'v0.0.443', 'cf', 0.0), ('sepreresnet542bn_svhn', '0247', '8e2427367762cf20b67b407e2a1ec8479b0ad41c', 'v0.0.443', 'cf', 0.0), ('pyramidnet110_a48_cifar10', '0372', '3b6ab16073fb0ff438d4376d320be9b119aee362', 'v0.0.444', 'cf', 0.0), ('pyramidnet110_a48_cifar100', '2095', '3490690ae62adc4b91dc29ba06f9dc2abf272fce', 'v0.0.444', 'cf', 0.0), ('pyramidnet110_a48_svhn', '0247', '1582739049630e1665b577781ccca1e65f961749', 'v0.0.444', 'cf', 0.0), ('pyramidnet110_a84_cifar10', '0298', 'bf303f3414123bdf79cb23d3316dd171df74f5d4', 'v0.0.444', 'cf', 0.0), ('pyramidnet110_a84_cifar100', '1887', '85789d68d11ad663a53ed921ce6fb28a98248874', 'v0.0.444', 'cf', 0.0), ('pyramidnet110_a84_svhn', '0243', 'aacb5f882c7810181c0d4de061c2a76dfbf4925b', 'v0.0.444', 'cf', 0.0), ('pyramidnet110_a270_cifar10', '0251', '983d99830e7bb23ca0123ec47dfa05143eb8a37e', 'v0.0.444', 'cf', 0.0), ('pyramidnet110_a270_cifar100', '1710', 'cc58021f2406c3593a51f62d03fea714d0649036', 'v0.0.444', 'cf', 0.0), ('pyramidnet110_a270_svhn', '0238', 'b8742320795657a0b51d35226c2e14fc76acac11', 'v0.0.444', 'cf', 0.0), ('pyramidnet164_a270_bn_cifar10', '0242', 'aa879193cd4730fd06430b494c11497121fad2df', 'v0.0.444', 'cf', 0.0), ('pyramidnet164_a270_bn_cifar100', '1670', '25ddf056b681987c1db76b60a08a1e1a7830a51e', 'v0.0.444', 'cf', 0.0), ('pyramidnet164_a270_bn_svhn', '0234', '94bb4029e52688f7616d5fd680acacf7c6e3cd4e', 'v0.0.444', 'cf', 0.0), ('pyramidnet200_a240_bn_cifar10', '0244', 'c269bf7d485a13a9beed9c0aade75ff959584ef9', 'v0.0.444', 'cf', 0.0), ('pyramidnet200_a240_bn_cifar100', '1609', 'd2b1682287b6047477c3efd322f305957bb393ef', 'v0.0.444', 'cf', 0.0), ('pyramidnet200_a240_bn_svhn', '0232', '77f2380c1fd77abb80b830e0d44f2986fde28ec9', 'v0.0.444', 'cf', 0.0), ('pyramidnet236_a220_bn_cifar10', '0247', '26aac5d0938a96902484f0a51f7f3440551c9c96', 'v0.0.444', 'cf', 0.0), ('pyramidnet236_a220_bn_cifar100', '1634', '37d5b197d45c3985ad3a9ba346f148e63cd271fb', 'v0.0.444', 'cf', 0.0), ('pyramidnet236_a220_bn_svhn', '0235', '6a9a8b0a5fbcce177c8b4449ad138b6f3a94f2bb', 'v0.0.444', 'cf', 0.0), ('pyramidnet272_a200_bn_cifar10', '0239', 'b57f64f1964798fac3d62fd796c87df8132cf18c', 'v0.0.444', 'cf', 0.0), ('pyramidnet272_a200_bn_cifar100', '1619', '5c233384141f7700da643c53f4245d2f0d00ded7', 'v0.0.444', 'cf', 0.0), ('pyramidnet272_a200_bn_svhn', '0240', '0a389e2f1811af7cacc2a27b6df748a7c46d951a', 'v0.0.444', 'cf', 0.0), ('densenet40_k12_cifar10', '0561', 'e6e20ebfcc60330050d4c1eb94d03d8fadb738df', 'v0.0.445', 'cf', 0.0), ('densenet40_k12_cifar100', '2490', 'ef38ff655136f7921e785836c659be7f1d11424d', 'v0.0.445', 'cf', 0.0), ('densenet40_k12_svhn', '0305', '7d5860ae4c8f912a4374e6214720d13ad52f3ffb', 'v0.0.445', 'cf', 0.0), ('densenet40_k12_bc_cifar10', '0643', '58950791713ee0ec19f6e1bc6e6e3731fc4a9484', 'v0.0.445', 'cf', 0.0), ('densenet40_k12_bc_cifar100', '2841', 'c7fbb0f4e74cafbd0e329597e63fbc81682c8e90', 'v0.0.445', 'cf', 0.0), ('densenet40_k12_bc_svhn', '0320', '77fd3ddf577ba336f7eac64f0ac6afaabbb25fd1', 'v0.0.445', 'cf', 0.0), ('densenet40_k24_bc_cifar10', '0452', '61a7fe9c0654161991da1e4eb1e0286d451d8cec', 'v0.0.445', 'cf', 0.0), ('densenet40_k24_bc_cifar100', '2267', 'b3878e8252d7ae1c53b6d2b5c6f77a857c281e9b', 'v0.0.445', 'cf', 0.0), ('densenet40_k24_bc_svhn', '0290', 'b8a231f7cd23b122bb8d9afe362c6de2663c1241', 'v0.0.445', 'cf', 0.0), ('densenet40_k36_bc_cifar10', '0404', 'ce27624f5701f020d2feff0e88e69da07b0ef958', 'v0.0.445', 'cf', 0.0), ('densenet40_k36_bc_cifar100', '2050', '045ae83a5ee3d1a85864cadadeb537242138c2d8', 'v0.0.445', 'cf', 0.0), ('densenet40_k36_bc_svhn', '0260', 'a176dcf180f086d88bbf4ff028b084bf02394a35', 'v0.0.445', 'cf', 0.0), ('densenet100_k12_cifar10', '0366', 'fc483c0bdd58e5013a3910f939334d5f40c65438', 'v0.0.445', 'cf', 0.0), ('densenet100_k12_cifar100', '1965', '4f0083d6698d42165c8b326c1e4beda6d9679796', 'v0.0.445', 'cf', 0.0), ('densenet100_k12_svhn', '0260', 'e810c38067bf34dc679caaeb4021623f2277b6b8', 'v0.0.445', 'cf', 0.0), ('densenet100_k24_cifar10', '0313', '7f9ee9b3787c2540c4448f424c504f0509000234', 'v0.0.445', 'cf', 0.0), ('densenet100_k24_cifar100', '1808', 'b0842c59c00f14df58d0f8bbac8348837e30e751', 'v0.0.445', 'cf', 0.0), ('densenet100_k12_bc_cifar10', '0416', '66beb8fc89f7e40d2b529e0f3270549324b5b784', 'v0.0.445', 'cf', 0.0), ('densenet100_k12_bc_cifar100', '2119', 'c1b857d51eb582eee8dbd7250d05871e40a7f4c4', 'v0.0.445', 'cf', 0.0), ('densenet190_k40_bc_cifar10', '0252', '9cc5cfcbef9425227370ac8c6404cfc1e3edbf55', 'v0.0.445', 'cf', 0.0), ('densenet250_k24_bc_cifar10', '0267', '3217a1b3c61afc9d08bc4b43bff4aac103da0012', 'v0.0.445', 'cf', 0.0), ('densenet250_k24_bc_cifar100', '1739', '02d967b564c48b25117aac6cd7b095fd5d30d4d5', 'v0.0.445', 'cf', 0.0), ('resnet10_cub', '2758', '1a6846b3854d1942997d7082e94b330ddce3db19', 'v0.0.446', 'cub', 0.0), ('resnet12_cub', '2668', '03c8073655ae51f21ceed7d7f86f9ed6169fc310', 'v0.0.446', 'cub', 0.0), ('resnet14_cub', '2435', '24b0bfebaa0d1b4442fa63a659d22de8ff594118', 'v0.0.446', 'cub', 0.0), ('resnet16_cub', '2328', '81cc8192c880c687175d636a0339e16463c61627', 'v0.0.446', 'cub', 0.0), ('resnet18_cub', '2335', '198bdc26bbfaad777ea6d494c41b9d66a493aac7', 'v0.0.446', 'cub', 0.0), ('resnet26_cub', '2264', '545967849063af9b5ec55a5cf339f5897f394e85', 'v0.0.446', 'cub', 0.0), ('seresnet10_cub', '2749', '484fc1661dda247db32dd6a54b88dc156da5156c', 'v0.0.446', 'cub', 0.0), ('seresnet12_cub', '2611', '0e5b4e23f30add924f8cad41704cb335a36b2049', 'v0.0.446', 'cub', 0.0), ('seresnet14_cub', '2375', '56c268728f7343aa1410cb2f046860c34428b123', 'v0.0.446', 'cub', 0.0), ('seresnet16_cub', '2321', 'ed3ead791be4af44aa1202f0dbf4b26fdb770963', 'v0.0.446', 'cub', 0.0), ('seresnet18_cub', '2309', 'f699f05f2a2ce41dae01d5d6c180ec2569356f0a', 'v0.0.446', 'cub', 0.0), ('seresnet26_cub', '2258', 'c02ba47493bc9185a7fb06584e23b5a740082e77', 'v0.0.446', 'cub', 0.0), ('mobilenet_w1_cub', '2346', 'b8f24c14b9ed9629efb161510547e30c4a37edc2', 'v0.0.446', 'cub', 0.0), ('proxylessnas_mobile_cub', '2202', '73ceed5a6a3f870b306da0c48318d969e53d6340', 'v0.0.446', 'cub', 0.0), ('pspnet_resnetd101b_voc', '7599', 'fbe47bfce77b8c9cab3c9c5913f6a42c04cce946', 'v0.0.448', 'voc', 0.0), ('pspnet_resnetd50b_ade20k', '2712', 'f4fadf0b3f5a39e1ab070736d792bd9259c0d371', 'v0.0.450', 'voc', 0.0), ('pspnet_resnetd101b_ade20k', '3259', 'ac8569f44bd646ee8875d2b3eae0ab54c72c4904', 'v0.0.450', 'voc', 0.0), ('pspnet_resnetd101b_coco', '5438', 'b64ff2dcde6d3f989c45cec2a021d3769f4cb9eb', 'v0.0.451', 'voc', 0.0), ('pspnet_resnetd101b_cityscapes', '5760', '6dc20af68e9de31b663469b170e75cb016bd3a1f', 'v0.0.449', 'cs', 0.0), ('deeplabv3_resnetd101b_voc', '7560', 'e261b6fd9c4878c41bfa088777ea53fcddb4fa51', 'v0.0.448', 'voc', 0.0), ('deeplabv3_resnetd152b_voc', '7791', '72038caba5f552c77d08ad768bda004643f1c53e', 'v0.0.448', 'voc', 0.0), ('deeplabv3_resnetd50b_ade20k', '3172', '2ba069a73d81d6b2ceaf7f2c57f2fe3dd673b78b', 'v0.0.450', 'voc', 0.0), ('deeplabv3_resnetd101b_ade20k', '3488', '08c90933a65061a56e3b22e9c143340a98455075', 'v0.0.450', 'voc', 0.0), ('deeplabv3_resnetd101b_coco', '5865', '39525a1333ebf12ca32578f32831b3e5b22a887a', 'v0.0.451', 'voc', 0.0), ('deeplabv3_resnetd152b_coco', '6067', 'f4dabc62dc8209e7a9adf0dceef97837b06b21c9', 'v0.0.451', 'voc', 0.0), ('fcn8sd_resnetd101b_voc', '8039', 'e140349ce60ad3943b535efb081b3e9c2a58f6e9', 'v0.0.448', 'voc', 0.0), ('fcn8sd_resnetd50b_ade20k', '3310', 'd440f859bad1c84790aa1c3e1c0addc21b171d4a', 'v0.0.450', 'voc', 0.0), ('fcn8sd_resnetd101b_ade20k', '3550', '970d968a1fb44670993b065c1603a6a7c0bd57a1', 'v0.0.450', 'voc', 0.0), ('fcn8sd_resnetd101b_coco', '5968', '69c001b3875c5399dfc1281eb5a051bafef40e4b', 'v0.0.451', 'voc', 0.0), ('icnet_resnetd50b_cityscapes', '6060', '1e53e1d1724e61cc740cfbc818ca6e14015185ef', 'v0.0.457', 'cs', 0.0), ('fastscnn_cityscapes', '6505', 'ccc39c9bab2ca751f9ad524a430c4fe8c492ced9', 'v0.0.474', 'cs', 0.0), ('bisenet_resnet18_celebamaskhq', '0000', 'e8799341e74332932f5d162e3c1c780596caa219', 'v0.0.462', 'cs', 0.0), ('danet_resnetd50b_cityscapes', '6806', 'c79f5f22a16ea13d51fa062642e1d16133b4b700', 'v0.0.468', 'cs', 0.0), ('danet_resnetd101b_cityscapes', '6790', 'ebd5eef60777545f97f6aebc2be9510fe7e780c4', 'v0.0.468', 'cs', 0.0), ('alphapose_fastseresnet101b_coco', '7415', 'd1f0464a0f2c520d8690d49d09fe1426b0ab3eab', 'v0.0.454', 'cocohpe', 0.0), ('simplepose_resnet18_coco', '6631', '4d907c70a6f3ccaba321c05406ce038351e0c67f', 'v0.0.455', 'cocohpe', 0.0), ('simplepose_resnet50b_coco', '7102', '74506b66735333e3deab5908d309d3ec04c94861', 'v0.0.455', 'cocohpe', 0.0), ('simplepose_resnet101b_coco', '7244', '6f9e08d6afa08e83176e8e04f7566e255265e080', 'v0.0.455', 'cocohpe', 0.0), ('simplepose_resnet152b_coco', '7253', 'c018fb87bb8e5f5d8d6daa6a922869b2f36481cf', 'v0.0.455', 'cocohpe', 0.0), ('simplepose_resneta50b_coco', '7170', 'c9ddc1c90ddac88b1f64eb962e1bda87887668a5', 'v0.0.455', 'cocohpe', 0.0), ('simplepose_resneta101b_coco', '7297', '6db62b714be632359020c972bedb459e5210820f', 'v0.0.455', 'cocohpe', 0.0), ('simplepose_resneta152b_coco', '7344', 'f65954b9df20bf9fa64a9791563729fa51983cf5', 'v0.0.455', 'cocohpe', 0.0), ('simplepose_mobile_resnet18_coco', '6625', '8f3e5cc4c6af306c23f0882887d7b36ee0b1079a', 'v0.0.456', 'cocohpe', 0.0), # noqa ('simplepose_mobile_resnet50b_coco', '7110', 'e8f61fdaf7aacbe58d006129943988ae95c9aef3', 'v0.0.456', 'cocohpe', 0.0), # noqa ('simplepose_mobile_mobilenet_w1_coco', '6410', '27c918b95148b87944eec36ac422bf18792513ae', 'v0.0.456', 'cocohpe', 0.0), # noqa ('simplepose_mobile_mobilenetv2b_w1_coco', '6374', '4bcc3462fb2af46ed6daed78d15920a274e58051', 'v0.0.456', 'cocohpe', 0.0), # noqa ('simplepose_mobile_mobilenetv3_small_w1_coco', '5434', '1cfee871467e99e7af23e5135bb9a4765f010a05', 'v0.0.456', 'cocohpe', 0.0), # noqa ('simplepose_mobile_mobilenetv3_large_w1_coco', '6367', '8c8583fbe6d60355c232a10b5de8a455a38ba073', 'v0.0.456', 'cocohpe', 0.0), # noqa ('lwopenpose2d_mobilenet_cmupan_coco', '3999', '626b66cb1d36d0721b59d5acaa8d08d7690ea830', 'v0.0.458', 'cocohpe', 0.0), # noqa ('lwopenpose3d_mobilenet_cmupan_coco', '3999', 'df9b1c5f667deb93a87f69479ce92093e7c9f3b6', 'v0.0.458', 'cocohpe', 0.0), # noqa ('ibppose_coco', '6487', '79500f3d5dd990fd63544e3e3ca65f0382b06e44', 'v0.0.459', 'cocohpe', 0.0), ('jasperdr10x5_en', 'nana', '867a5dfc87310f4a503cea0182f2a8d56e0bc27e', 'v0.0.555', 'mcv', 0.0), ('jasperdr10x5_en_nr', 'nana', 'a0af1118cb092427f30871c6bcfe937cd2048045', 'v0.0.555', 'mcv', 0.0), ('quartznet5x5_en_ls', 'nana', 'a82dbfb02609a24986210b78eccdd18d133fdbaa', 'v0.0.555', 'mcv', 0.0), ('quartznet15x5_en', 'nana', '5d32ca3141d35ba836a78e47ae126b8ffd7eb3d3', 'v0.0.555', 'mcv', 0.0), ('quartznet15x5_en_nr', 'nana', 'f5d079c70c10639da81442e7f806ebab84e582d7', 'v0.0.555', 'mcv', 0.0), ('quartznet15x5_de', 'nana', 'b8c9c93938e9006dbddc1436bd12d1758da3bc9b', 'v0.0.555', 'mcv', 0.0), ('quartznet15x5_fr', 'nana', 'e5c5937d58e9f6b7c02d4082ad6bad84182939ee', 'v0.0.555', 'mcv', 0.0), ('quartznet15x5_it', 'nana', '7b95ad8f56b05a1dcfbcbeac0677937b4d98189f', 'v0.0.555', 'mcv', 0.0), ('quartznet15x5_es', 'nana', 'b371f87e60f3a4a707f0d08012f40a3f76cd93c4', 'v0.0.555', 'mcv', 0.0), ('quartznet15x5_ca', 'nana', '8a43e3a9a09ca1e7a213733bf0810d3384b71600', 'v0.0.555', 'mcv', 0.0), ('quartznet15x5_pl', 'nana', '4d3e67308d1410cdbf5191cad7104c4093f8d3f3', 'v0.0.555', 'mcv', 0.0), ('quartznet15x5_ru', 'nana', 'b891447d610753c7acc27d95c450da42074c25d7', 'v0.0.555', 'mcv', 0.0), ('quartznet15x5_ru34', 'nana', '398571cc25de51d57ea75207beb18dd5e8eab8b7', 'v0.0.555', 'mcv', 0.0), ]} imgclsmob_repo_url = 'https://github.com/osmr/imgclsmob' def get_model_name_suffix_data(model_name): if model_name not in _model_sha1: raise ValueError("Pretrained model for {name} is not available.".format(name=model_name)) error, sha1_hash, repo_release_tag, ds, scale = _model_sha1[model_name] return error, sha1_hash, repo_release_tag def get_model_file(model_name, local_model_store_dir_path=os.path.join("~", ".tensorflow", "models")): """ Return location for the pretrained on local file system. This function will download from online model zoo when model cannot be found or has mismatch. The root directory will be created if it doesn't exist. Parameters: ---------- model_name : str Name of the model. local_model_store_dir_path : str, default $TENSORFLOW_HOME/models Location for keeping the model parameters. Returns: ------- file_path Path to the requested pretrained model file. """ error, sha1_hash, repo_release_tag = get_model_name_suffix_data(model_name) short_sha1 = sha1_hash[:8] file_name = "{name}-{error}-{short_sha1}.tf2.h5".format( name=model_name, error=error, short_sha1=short_sha1) local_model_store_dir_path = os.path.expanduser(local_model_store_dir_path) file_path = os.path.join(local_model_store_dir_path, file_name) if os.path.exists(file_path): if _check_sha1(file_path, sha1_hash): return file_path else: logging.warning("Mismatch in the content of model file detected. Downloading again.") else: logging.info("Model file not found. Downloading to {}.".format(file_path)) if not os.path.exists(local_model_store_dir_path): os.makedirs(local_model_store_dir_path) zip_file_path = file_path + ".zip" _download( url="{repo_url}/releases/download/{repo_release_tag}/{file_name}.zip".format( repo_url=imgclsmob_repo_url, repo_release_tag=repo_release_tag, file_name=file_name), path=zip_file_path, overwrite=True) with zipfile.ZipFile(zip_file_path) as zf: zf.extractall(local_model_store_dir_path) os.remove(zip_file_path) if _check_sha1(file_path, sha1_hash): return file_path else: raise ValueError("Downloaded file has different hash. Please try again.") def _download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True): """ Download an given URL Parameters: ---------- url : str URL to download path : str, optional Destination path to store downloaded file. By default stores to the current directory with same name as in url. overwrite : bool, optional Whether to overwrite destination file if already exists. sha1_hash : str, optional Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified but doesn't match. retries : integer, default 5 The number of times to attempt the download in case of failure or non 200 return codes verify_ssl : bool, default True Verify SSL certificates. Returns: ------- str The file path of the downloaded file. """ import warnings try: import requests except ImportError: class requests_failed_to_import(object): pass requests = requests_failed_to_import if path is None: fname = url.split("/")[-1] # Empty filenames are invalid assert fname, "Can't construct file-name from this URL. Please set the `path` option manually." else: path = os.path.expanduser(path) if os.path.isdir(path): fname = os.path.join(path, url.split("/")[-1]) else: fname = path assert retries >= 0, "Number of retries should be at least 0" if not verify_ssl: warnings.warn( "Unverified HTTPS request is being made (verify_ssl=False). " "Adding certificate verification is strongly advised.") if overwrite or not os.path.exists(fname) or (sha1_hash and not _check_sha1(fname, sha1_hash)): dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname))) if not os.path.exists(dirname): os.makedirs(dirname) while retries + 1 > 0: # Disable pyling too broad Exception # pylint: disable=W0703 try: print("Downloading {} from {}...".format(fname, url)) r = requests.get(url, stream=True, verify=verify_ssl) if r.status_code != 200: raise RuntimeError("Failed downloading url {}".format(url)) with open(fname, "wb") as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) if sha1_hash and not _check_sha1(fname, sha1_hash): raise UserWarning("File {} is downloaded but the content hash does not match." " The repo may be outdated or download may be incomplete. " "If the `repo_url` is overridden, consider switching to " "the default repo.".format(fname)) break except Exception as e: retries -= 1 if retries <= 0: raise e else: print("download failed, retrying, {} attempt{} left" .format(retries, "s" if retries > 1 else "")) return fname def _check_sha1(filename, sha1_hash): """ Check whether the sha1 hash of the file content matches the expected hash. Parameters: ---------- filename : str Path to the file. sha1_hash : str Expected sha1 hash in hexadecimal digits. Returns: ------- bool Whether the file content matches the expected hash. """ sha1 = hashlib.sha1() with open(filename, "rb") as f: while True: data = f.read(1048576) if not data: break sha1.update(data) return sha1.hexdigest() == sha1_hash
66,178
85.849081
140
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/fastseresnet.py
""" Fast-SE-ResNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['FastSEResNet', 'fastseresnet101b'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1_block, SEBlock, SimpleSequential, flatten from .resnet import ResBlock, ResBottleneck, ResInitBlock class FastSEResUnit(nn.Layer): """ Fast-SE-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. use_se : bool Whether to use SE-module. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, bottleneck, conv1_stride, use_se, data_format="channels_last", **kwargs): super(FastSEResUnit, self).__init__(**kwargs) self.use_se = use_se self.resize_identity = (in_channels != out_channels) or (strides != 1) if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, strides=strides, conv1_stride=conv1_stride, data_format=data_format, name="body") else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, strides=strides, data_format=data_format, name="body") if self.use_se: self.se = SEBlock( channels=out_channels, reduction=1, use_conv=False, data_format=data_format, name="se") if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, data_format=data_format, name="identity_conv") self.activ = nn.ReLU() def call(self, x, training=None): if self.resize_identity: identity = self.identity_conv(x, training=training) else: identity = x x = self.body(x, training=training) if self.use_se: x = self.se(x) x = x + identity x = self.activ(x) return x class FastSEResNet(tf.keras.Model): """ Fast-SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(FastSEResNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(ResInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 use_se = (j == 0) stage.add(FastSEResUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, bottleneck=bottleneck, conv1_stride=conv1_stride, use_se=use_se, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_fastseresnet(blocks, bottleneck=None, conv1_stride=True, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create Fast-SE-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported Fast-SE-ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = FastSEResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def fastseresnet101b(**kwargs): """ Fast-SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_fastseresnet(blocks=101, conv1_stride=False, model_name="fastseresnet101b", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K pretrained = False models = [ fastseresnet101b, ] for model in models: net = model(pretrained=pretrained) batch = 14 x = tf.random.normal((batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) # assert (model != fastseresnet101b or weight_count == 55697960) if __name__ == "__main__": _test()
10,194
31.887097
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/ibnbresnet.py
""" IBN(b)-ResNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. """ __all__ = ['IBNbResNet', 'ibnb_resnet50', 'ibnb_resnet101', 'ibnb_resnet152'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import MaxPool2d, InstanceNorm, Conv2d, conv1x1_block, SimpleSequential, flatten, is_channels_first from .resnet import ResBottleneck class IBNbConvBlock(nn.Layer): """ IBN(b)-ResNet specific convolution block with Instance normalization and ReLU activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. activate : bool, default True Whether activate the convolution block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, dilation=1, groups=1, use_bias=False, activate=True, data_format="channels_last", **kwargs): super(IBNbConvBlock, self).__init__(**kwargs) self.activate = activate self.conv = Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, dilation=dilation, groups=groups, use_bias=use_bias, data_format=data_format, name="conv") self.inst_norm = InstanceNorm( scale=True, data_format=data_format, name="inst_norm") if self.activate: self.activ = nn.ReLU() def call(self, x, training=None): x = self.conv(x, training=training) x = self.inst_norm(x, training=training) if self.activate: x = self.activ(x) return x def ibnb_conv7x7_block(in_channels, out_channels, strides=1, padding=3, use_bias=False, activate=True, data_format="channels_last", **kwargs): """ 7x7 version of the IBN(b)-ResNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 3 Padding value for convolution layer. use_bias : bool, default False Whether the layer uses a bias vector. activate : bool, default True Whether activate the convolution block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return IBNbConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=7, strides=strides, padding=padding, use_bias=use_bias, activate=activate, data_format=data_format, **kwargs) class IBNbResUnit(nn.Layer): """ IBN(b)-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. use_inst_norm : bool Whether to use instance normalization. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, use_inst_norm, data_format="channels_last", **kwargs): super(IBNbResUnit, self).__init__(**kwargs) self.use_inst_norm = use_inst_norm self.resize_identity = (in_channels != out_channels) or (strides != 1) self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, strides=strides, conv1_stride=False, data_format=data_format, name="body") if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, data_format=data_format, name="identity_conv") if self.use_inst_norm: self.inst_norm = InstanceNorm( scale=True, data_format=data_format, name="inst_norm") self.activ = nn.ReLU() def call(self, x, training=None): if self.resize_identity: identity = self.identity_conv(x, training=training) else: identity = x x = self.body(x, training=training) x = x + identity if self.use_inst_norm: x = self.inst_norm(x, training=training) x = self.activ(x) return x class IBNbResInitBlock(nn.Layer): """ IBN(b)-ResNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(IBNbResInitBlock, self).__init__(**kwargs) self.conv = ibnb_conv7x7_block( in_channels=in_channels, out_channels=out_channels, strides=2, data_format=data_format, name="conv") self.pool = MaxPool2d( pool_size=3, strides=2, padding=1, data_format=data_format, name="pool") def call(self, x, training=None): x = self.conv(x, training=training) x = self.pool(x) return x class IBNbResNet(tf.keras.Model): """ IBN(b)-ResNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(IBNbResNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(IBNbResInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 use_inst_norm = (i < 2) and (j == len(channels_per_stage) - 1) stage.add(IBNbResUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, use_inst_norm=use_inst_norm, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_ibnbresnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create IBN(b)-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] else: raise ValueError("Unsupported IBN(b)-ResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = IBNbResNet( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def ibnb_resnet50(**kwargs): """ IBN(b)-ResNet-50 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_ibnbresnet(blocks=50, model_name="ibnb_resnet50", **kwargs) def ibnb_resnet101(**kwargs): """ IBN(b)-ResNet-101 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_ibnbresnet(blocks=101, model_name="ibnb_resnet101", **kwargs) def ibnb_resnet152(**kwargs): """ IBN(b)-ResNet-152 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_ibnbresnet(blocks=152, model_name="ibnb_resnet152", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ ibnb_resnet50, ibnb_resnet101, ibnb_resnet152, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != ibnb_resnet50 or weight_count == 25558568) assert (model != ibnb_resnet101 or weight_count == 44550696) assert (model != ibnb_resnet152 or weight_count == 60194344) if __name__ == "__main__": _test()
13,824
31.377049
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/polynet.py
""" PolyNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,' https://arxiv.org/abs/1611.05725. """ __all__ = ['PolyNet', 'polynet'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import MaxPool2d, Conv2d, ConvBlock, BatchNorm, SimpleSequential, ParametricSequential, Concurrent,\ ParametricConcurrent, conv1x1_block, conv3x3_block, flatten, is_channels_first class PolyConv(nn.Layer): """ PolyNet specific convolution block. A block that is used inside poly-N (poly-2, poly-3, and so on) modules. The Convolution layer is shared between all Inception blocks inside a poly-N module. BatchNorm layers are not shared between Inception blocks and therefore the number of BatchNorm layers is equal to the number of Inception blocks inside a poly-N module. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. num_blocks : int Number of blocks (BatchNorm layers). data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, num_blocks, data_format="channels_last", **kwargs): super(PolyConv, self).__init__(**kwargs) self.conv = Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, use_bias=False, data_format=data_format, name="conv") self.bns = [] for i in range(num_blocks): self.bns.append(BatchNorm( data_format=data_format, name="bn{}".format(i + 1))) self.activ = nn.ReLU() def call(self, x, index, training=None): x = self.conv(x) x = self.bns[index](x) x = self.activ(x) return x def poly_conv1x1(in_channels, out_channels, num_blocks, data_format="channels_last", **kwargs): """ 1x1 version of the PolyNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. num_blocks : int Number of blocks (BatchNorm layers). data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return PolyConv( in_channels=in_channels, out_channels=out_channels, kernel_size=1, strides=1, padding=0, num_blocks=num_blocks, data_format=data_format, **kwargs) class MaxPoolBranch(nn.Layer): """ PolyNet specific max pooling branch block. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(MaxPoolBranch, self).__init__(**kwargs) self.pool = MaxPool2d( pool_size=3, strides=2, padding=0, data_format=data_format, name="pool") def call(self, x, training=None): x = self.pool(x) return x class Conv1x1Branch(nn.Layer): """ PolyNet specific convolutional 1x1 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(Conv1x1Branch, self).__init__(**kwargs) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, data_format=data_format, name="conv") def call(self, x, training=None): x = self.conv(x, training=training) return x class Conv3x3Branch(nn.Layer): """ PolyNet specific convolutional 3x3 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(Conv3x3Branch, self).__init__(**kwargs) self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=2, padding=0, data_format=data_format, name="conv") def call(self, x, training=None): x = self.conv(x, training=training) return x class ConvSeqBranch(nn.Layer): """ PolyNet specific convolutional sequence branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of tuple of int List of numbers of output channels. kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int List of convolution window sizes. strides_list : list of tuple of int or tuple of tuple/list of 2 int List of strides of the convolution. padding_list : list of tuple of int or tuple of tuple/list of 2 int List of padding values for convolution layers. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels_list, kernel_size_list, strides_list, padding_list, data_format="channels_last", **kwargs): super(ConvSeqBranch, self).__init__(**kwargs) assert (len(out_channels_list) == len(kernel_size_list)) assert (len(out_channels_list) == len(strides_list)) assert (len(out_channels_list) == len(padding_list)) self.conv_list = SimpleSequential(name="conv_list") for i, (out_channels, kernel_size, strides, padding) in enumerate(zip( out_channels_list, kernel_size_list, strides_list, padding_list)): self.conv_list.add(ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, name="conv{}".format(i + 1))) in_channels = out_channels def call(self, x, training=None): x = self.conv_list(x, training=training) return x class PolyConvSeqBranch(nn.Layer): """ PolyNet specific convolutional sequence branch block with internal PolyNet specific convolution blocks. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of tuple of int List of numbers of output channels. kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int List of convolution window sizes. strides_list : list of tuple of int or tuple of tuple/list of 2 int List of strides of the convolution. padding_list : list of tuple of int or tuple of tuple/list of 2 int List of padding values for convolution layers. num_blocks : int Number of blocks for PolyConv. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels_list, kernel_size_list, strides_list, padding_list, num_blocks, data_format="channels_last", **kwargs): super(PolyConvSeqBranch, self).__init__(**kwargs) assert (len(out_channels_list) == len(kernel_size_list)) assert (len(out_channels_list) == len(strides_list)) assert (len(out_channels_list) == len(padding_list)) self.conv_list = ParametricSequential(name="conv_list") for i, (out_channels, kernel_size, strides, padding) in enumerate(zip( out_channels_list, kernel_size_list, strides_list, padding_list)): self.conv_list.add(PolyConv( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, num_blocks=num_blocks, data_format=data_format, name="conv{}".format(i + 1))) in_channels = out_channels def call(self, x, index, training=None): x = self.conv_list(x, index=index, training=training) return x class TwoWayABlock(nn.Layer): """ PolyNet type Inception-A block. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(TwoWayABlock, self).__init__(**kwargs) in_channels = 384 self.branches = Concurrent( data_format=data_format, name="branches") self.branches.add(ConvSeqBranch( in_channels=in_channels, out_channels_list=(32, 48, 64), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 1), padding_list=(0, 1, 1), data_format=data_format, name="branch1")) self.branches.add(ConvSeqBranch( in_channels=in_channels, out_channels_list=(32, 32), kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 1), data_format=data_format, name="branch2")) self.branches.add(Conv1x1Branch( in_channels=in_channels, out_channels=32, data_format=data_format, name="branch3")) self.conv = conv1x1_block( in_channels=128, out_channels=in_channels, activation=None, data_format=data_format, name="conv") def call(self, x, training=None): x = self.branches(x, training=training) x = self.conv(x, training=training) return x class TwoWayBBlock(nn.Layer): """ PolyNet type Inception-B block. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(TwoWayBBlock, self).__init__(**kwargs) in_channels = 1152 self.branches = Concurrent( data_format=data_format, name="branches") self.branches.add(ConvSeqBranch( in_channels=in_channels, out_channels_list=(128, 160, 192), kernel_size_list=(1, (1, 7), (7, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 3), (3, 0)), data_format=data_format, name="branch1")) self.branches.add(Conv1x1Branch( in_channels=in_channels, out_channels=192, data_format=data_format, name="branch2")) self.conv = conv1x1_block( in_channels=384, out_channels=in_channels, activation=None, data_format=data_format, name="conv") def call(self, x, training=None): x = self.branches(x, training=training) x = self.conv(x, training=training) return x class TwoWayCBlock(nn.Layer): """ PolyNet type Inception-C block. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(TwoWayCBlock, self).__init__(**kwargs) in_channels = 2048 self.branches = Concurrent( data_format=data_format, name="branches") self.branches.add(ConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 224, 256), kernel_size_list=(1, (1, 3), (3, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 1), (1, 0)), data_format=data_format, name="branch1")) self.branches.add(Conv1x1Branch( in_channels=in_channels, out_channels=192, data_format=data_format, name="branch2")) self.conv = conv1x1_block( in_channels=448, out_channels=in_channels, activation=None, data_format=data_format, name="conv") def call(self, x, training=None): x = self.branches(x, training=training) x = self.conv(x, training=training) return x class PolyPreBBlock(nn.Layer): """ PolyNet type PolyResidual-Pre-B block. Parameters: ---------- num_blocks : int Number of blocks (BatchNorm layers). data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, num_blocks, data_format="channels_last", **kwargs): super(PolyPreBBlock, self).__init__(**kwargs) in_channels = 1152 self.branches = ParametricConcurrent( data_format=data_format, name="branches") self.branches.add(PolyConvSeqBranch( in_channels=in_channels, out_channels_list=(128, 160, 192), kernel_size_list=(1, (1, 7), (7, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 3), (3, 0)), num_blocks=num_blocks, data_format=data_format, name="branch1")) self.branches.add(poly_conv1x1( in_channels=in_channels, out_channels=192, num_blocks=num_blocks, data_format=data_format, name="branch2")) def call(self, x, index, training=None): x = self.branches(x, index=index, training=training) return x class PolyPreCBlock(nn.Layer): """ PolyNet type PolyResidual-Pre-C block. Parameters: ---------- num_blocks : int Number of blocks (BatchNorm layers). data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, num_blocks, data_format="channels_last", **kwargs): super(PolyPreCBlock, self).__init__(**kwargs) in_channels = 2048 self.branches = ParametricConcurrent( data_format=data_format, name="branches") self.branches.add(PolyConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 224, 256), kernel_size_list=(1, (1, 3), (3, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 1), (1, 0)), num_blocks=num_blocks, data_format=data_format, name="branch1")) self.branches.add(poly_conv1x1( in_channels=in_channels, out_channels=192, num_blocks=num_blocks, data_format=data_format, name="branch2")) def call(self, x, index, training=None): x = self.branches(x, index=index, training=training) return x def poly_res_b_block(data_format="channels_last", **kwargs): """ PolyNet type PolyResidual-Res-B block. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return conv1x1_block( in_channels=384, out_channels=1152, strides=1, activation=None, data_format=data_format, **kwargs) def poly_res_c_block(data_format="channels_last", **kwargs): """ PolyNet type PolyResidual-Res-C block. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return conv1x1_block( in_channels=448, out_channels=2048, strides=1, activation=None, data_format=data_format, **kwargs) class MultiResidual(nn.Layer): """ Base class for constructing N-way modules (2-way, 3-way, and so on). Actually it is for 2-way modules. Parameters: ---------- scale : float, default 1.0 Scale value for each residual branch. res_block : HybridBlock class Residual branch block. num_blocks : int Number of residual branches. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, scale, res_block, num_blocks, data_format="channels_last", **kwargs): super(MultiResidual, self).__init__(**kwargs) assert (num_blocks >= 1) self.scale = scale self.num_blocks = num_blocks self.res_blocks = [res_block( data_format=data_format, name="res_block{}".format(i + 1)) for i in range(num_blocks)] self.activ = nn.ReLU() def call(self, x, training=None): out = x for res_block in self.res_blocks: out = out + self.scale * res_block(x, training=training) out = self.activ(out) return out class PolyResidual(nn.Layer): """ The other base class for constructing N-way poly-modules. Actually it is for 3-way poly-modules. Parameters: ---------- scale : float, default 1.0 Scale value for each residual branch. res_block : HybridBlock class Residual branch block. num_blocks : int Number of residual branches. pre_block : HybridBlock class Preliminary block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, scale, res_block, num_blocks, pre_block, data_format="channels_last", **kwargs): super(PolyResidual, self).__init__(**kwargs) assert (num_blocks >= 1) self.scale = scale self.pre_block = pre_block( num_blocks=num_blocks, data_format=data_format, name="pre_block") self.res_blocks = [res_block( data_format=data_format, name="res_block{}".format(i + 1)) for i in range(num_blocks)] self.activ = nn.ReLU() def call(self, x, training=None): out = x for index, res_block in enumerate(self.res_blocks): x = self.pre_block(x, index, training=training) x = res_block(x, training=training) out = out + self.scale * x x = self.activ(x) out = self.activ(out) return out class PolyBaseUnit(nn.Layer): """ PolyNet unit base class. Parameters: ---------- two_way_scale : float Scale value for 2-way stage. two_way_block : HybridBlock class Residual branch block for 2-way-stage. poly_scale : float, default 0.0 Scale value for 2-way stage. poly_res_block : HybridBlock class, default None Residual branch block for poly-stage. poly_pre_block : HybridBlock class, default None Preliminary branch block for poly-stage. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, two_way_scale, two_way_block, poly_scale=0.0, poly_res_block=None, poly_pre_block=None, data_format="channels_last", **kwargs): super(PolyBaseUnit, self).__init__(**kwargs) if poly_res_block is not None: assert (poly_scale != 0.0) assert (poly_pre_block is not None) self.poly = PolyResidual( scale=poly_scale, res_block=poly_res_block, num_blocks=3, pre_block=poly_pre_block, data_format=data_format, name="poly") else: assert (poly_scale == 0.0) assert (poly_pre_block is None) self.poly = None self.twoway = MultiResidual( scale=two_way_scale, res_block=two_way_block, num_blocks=2, data_format=data_format, name="twoway") def call(self, x, training=None): if self.poly is not None: x = self.poly(x, training=training) x = self.twoway(x, training=training) return x class PolyAUnit(PolyBaseUnit): """ PolyNet type A unit. Parameters: ---------- two_way_scale : float Scale value for 2-way stage. poly_scale : float Scale value for 2-way stage. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, two_way_scale, poly_scale=0.0, data_format="channels_last", **kwargs): super(PolyAUnit, self).__init__( two_way_scale=two_way_scale, two_way_block=TwoWayABlock, data_format=data_format, **kwargs) assert (poly_scale == 0.0) class PolyBUnit(PolyBaseUnit): """ PolyNet type B unit. Parameters: ---------- two_way_scale : float Scale value for 2-way stage. poly_scale : float Scale value for 2-way stage. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, two_way_scale, poly_scale, data_format="channels_last", **kwargs): super(PolyBUnit, self).__init__( two_way_scale=two_way_scale, two_way_block=TwoWayBBlock, poly_scale=poly_scale, poly_res_block=poly_res_b_block, poly_pre_block=PolyPreBBlock, data_format=data_format, **kwargs) class PolyCUnit(PolyBaseUnit): """ PolyNet type C unit. Parameters: ---------- two_way_scale : float Scale value for 2-way stage. poly_scale : float Scale value for 2-way stage. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, two_way_scale, poly_scale, data_format="channels_last", **kwargs): super(PolyCUnit, self).__init__( two_way_scale=two_way_scale, two_way_block=TwoWayCBlock, poly_scale=poly_scale, poly_res_block=poly_res_c_block, poly_pre_block=PolyPreCBlock, data_format=data_format, **kwargs) class ReductionAUnit(nn.Layer): """ PolyNet type Reduction-A unit. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(ReductionAUnit, self).__init__(**kwargs) in_channels = 384 self.branches = Concurrent( data_format=data_format, name="branches") self.branches.add(ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 256, 384), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 2), padding_list=(0, 1, 0), data_format=data_format, name="branch1")) self.branches.add(ConvSeqBranch( in_channels=in_channels, out_channels_list=(384,), kernel_size_list=(3,), strides_list=(2,), padding_list=(0,), data_format=data_format, name="branch2")) self.branches.add(MaxPoolBranch( data_format=data_format, name="branch3")) def call(self, x, training=None): x = self.branches(x, training=training) return x class ReductionBUnit(nn.Layer): """ PolyNet type Reduction-B unit. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(ReductionBUnit, self).__init__(**kwargs) in_channels = 1152 self.branches = Concurrent( data_format=data_format, name="branches") self.branches.add(ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 256, 256), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 2), padding_list=(0, 1, 0), data_format=data_format, name="branch1")) self.branches.add(ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 256), kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0), data_format=data_format, name="branch2")) self.branches.add(ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 384), kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0), data_format=data_format, name="branch3")) self.branches.add(MaxPoolBranch( data_format=data_format, name="branch4")) def call(self, x, training=None): x = self.branches(x, training=training) return x class PolyBlock3a(nn.Layer): """ PolyNet type Mixed-3a block. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(PolyBlock3a, self).__init__(**kwargs) self.branches = Concurrent( data_format=data_format, name="branches") self.branches.add(MaxPoolBranch( data_format=data_format, name="branch1")) self.branches.add(Conv3x3Branch( in_channels=64, out_channels=96, data_format=data_format, name="branch2")) def call(self, x, training=None): x = self.branches(x, training=training) return x class PolyBlock4a(nn.Layer): """ PolyNet type Mixed-4a block. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(PolyBlock4a, self).__init__(**kwargs) self.branches = Concurrent( data_format=data_format, name="branches") self.branches.add(ConvSeqBranch( in_channels=160, out_channels_list=(64, 96), kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 0), data_format=data_format, name="branch1")) self.branches.add(ConvSeqBranch( in_channels=160, out_channels_list=(64, 64, 64, 96), kernel_size_list=(1, (7, 1), (1, 7), 3), strides_list=(1, 1, 1, 1), padding_list=(0, (3, 0), (0, 3), 0), data_format=data_format, name="branch2")) def call(self, x, training=None): x = self.branches(x, training=training) return x class PolyBlock5a(nn.Layer): """ PolyNet type Mixed-5a block. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(PolyBlock5a, self).__init__(**kwargs) self.branches = Concurrent( data_format=data_format, name="branches") self.branches.add(MaxPoolBranch( data_format=data_format, name="branch1")) self.branches.add(Conv3x3Branch( in_channels=192, out_channels=192, data_format=data_format, name="branch2")) def call(self, x, training=None): x = self.branches(x, training=training) return x class PolyInitBlock(nn.Layer): """ PolyNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, data_format="channels_last", **kwargs): super(PolyInitBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=32, strides=2, padding=0, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=32, out_channels=32, padding=0, data_format=data_format, name="conv2") self.conv3 = conv3x3_block( in_channels=32, out_channels=64, data_format=data_format, name="conv3") self.block1 = PolyBlock3a( data_format=data_format, name="block1") self.block2 = PolyBlock4a( data_format=data_format, name="block2") self.block3 = PolyBlock5a( data_format=data_format, name="block3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) x = self.block1(x, training=training) x = self.block2(x, training=training) x = self.block3(x, training=training) return x class PolyNet(tf.keras.Model): """ PolyNet model from 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,' https://arxiv.org/abs/1611.05725. Parameters: ---------- two_way_scales : list of list of floats Two way scale values for each normal unit. poly_scales : list of list of floats Three way scale values for each normal unit. dropout_rate : float, default 0.2 Fraction of the input units to drop. Must be a number between 0 and 1. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (331, 331) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, two_way_scales, poly_scales, dropout_rate=0.2, in_channels=3, in_size=(331, 331), classes=1000, data_format="channels_last", **kwargs): super(PolyNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format normal_units = [PolyAUnit, PolyBUnit, PolyCUnit] reduction_units = [ReductionAUnit, ReductionBUnit] self.features = SimpleSequential(name="features") self.features.add(PolyInitBlock( in_channels=in_channels, data_format=data_format, name="init_block")) for i, (two_way_scales_per_stage, poly_scales_per_stage) in enumerate(zip(two_way_scales, poly_scales)): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, (two_way_scale, poly_scale) in enumerate(zip(two_way_scales_per_stage, poly_scales_per_stage)): if (j == 0) and (i != 0): unit = reduction_units[i - 1] stage.add(unit( data_format=data_format, name="unit{}".format(j + 1))) else: unit = normal_units[i] stage.add(unit( two_way_scale=two_way_scale, poly_scale=poly_scale, data_format=data_format, name="unit{}".format(j + 1))) self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=9, strides=1, data_format=data_format, name="final_pool")) self.output1 = SimpleSequential(name="output1") self.output1.add(nn.Dropout( rate=dropout_rate, name="dropout")) self.output1.add(nn.Dense( units=classes, input_dim=2048, name="fc")) def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_polynet(model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create PolyNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ two_way_scales = [ [1.000000, 0.992308, 0.984615, 0.976923, 0.969231, 0.961538, 0.953846, 0.946154, 0.938462, 0.930769], [0.000000, 0.915385, 0.900000, 0.884615, 0.869231, 0.853846, 0.838462, 0.823077, 0.807692, 0.792308, 0.776923], [0.000000, 0.761538, 0.746154, 0.730769, 0.715385, 0.700000]] poly_scales = [ [0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000], [0.000000, 0.923077, 0.907692, 0.892308, 0.876923, 0.861538, 0.846154, 0.830769, 0.815385, 0.800000, 0.784615], [0.000000, 0.769231, 0.753846, 0.738462, 0.723077, 0.707692]] net = PolyNet( two_way_scales=two_way_scales, poly_scales=poly_scales, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def polynet(**kwargs): """ PolyNet model from 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,' https://arxiv.org/abs/1611.05725. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_polynet(model_name="polynet", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ polynet, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 331, 331) if is_channels_first(data_format) else (batch, 331, 331, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != polynet or weight_count == 95366600) if __name__ == "__main__": _test()
37,828
30.576795
119
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/resnet_cifar.py
""" ResNet for CIFAR/SVHN, implemented in TensorFlow. Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. """ __all__ = ['CIFARResNet', 'resnet20_cifar10', 'resnet20_cifar100', 'resnet20_svhn', 'resnet56_cifar10', 'resnet56_cifar100', 'resnet56_svhn', 'resnet110_cifar10', 'resnet110_cifar100', 'resnet110_svhn', 'resnet164bn_cifar10', 'resnet164bn_cifar100', 'resnet164bn_svhn', 'resnet272bn_cifar10', 'resnet272bn_cifar100', 'resnet272bn_svhn', 'resnet542bn_cifar10', 'resnet542bn_cifar100', 'resnet542bn_svhn', 'resnet1001_cifar10', 'resnet1001_cifar100', 'resnet1001_svhn', 'resnet1202_cifar10', 'resnet1202_cifar100', 'resnet1202_svhn'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv3x3_block, SimpleSequential, flatten, is_channels_first from .resnet import ResUnit class CIFARResNet(tf.keras.Model): """ ResNet model for CIFAR from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), classes=10, data_format="channels_last", **kwargs): super(CIFARResNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(ResUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, bottleneck=bottleneck, conv1_stride=False, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=8, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_resnet_cifar(classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create ResNet model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def resnet20_cifar10(classes=10, **kwargs): """ ResNet-20 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_cifar10", **kwargs) def resnet20_cifar100(classes=100, **kwargs): """ ResNet-20 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_cifar100", **kwargs) def resnet20_svhn(classes=10, **kwargs): """ ResNet-20 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_svhn", **kwargs) def resnet56_cifar10(classes=10, **kwargs): """ ResNet-56 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_cifar10", **kwargs) def resnet56_cifar100(classes=100, **kwargs): """ ResNet-56 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_cifar100", **kwargs) def resnet56_svhn(classes=10, **kwargs): """ ResNet-56 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_svhn", **kwargs) def resnet110_cifar10(classes=10, **kwargs): """ ResNet-110 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_cifar10", **kwargs) def resnet110_cifar100(classes=100, **kwargs): """ ResNet-110 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_cifar100", **kwargs) def resnet110_svhn(classes=10, **kwargs): """ ResNet-110 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_svhn", **kwargs) def resnet164bn_cifar10(classes=10, **kwargs): """ ResNet-164(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_cifar10", **kwargs) def resnet164bn_cifar100(classes=100, **kwargs): """ ResNet-164(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_cifar100", **kwargs) def resnet164bn_svhn(classes=10, **kwargs): """ ResNet-164(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_svhn", **kwargs) def resnet272bn_cifar10(classes=10, **kwargs): """ ResNet-272(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_cifar10", **kwargs) def resnet272bn_cifar100(classes=100, **kwargs): """ ResNet-272(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_cifar100", **kwargs) def resnet272bn_svhn(classes=10, **kwargs): """ ResNet-272(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_svhn", **kwargs) def resnet542bn_cifar10(classes=10, **kwargs): """ ResNet-542(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_cifar10", **kwargs) def resnet542bn_cifar100(classes=100, **kwargs): """ ResNet-542(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_cifar100", **kwargs) def resnet542bn_svhn(classes=10, **kwargs): """ ResNet-272(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_svhn", **kwargs) def resnet1001_cifar10(classes=10, **kwargs): """ ResNet-1001 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_cifar10", **kwargs) def resnet1001_cifar100(classes=100, **kwargs): """ ResNet-1001 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_cifar100", **kwargs) def resnet1001_svhn(classes=10, **kwargs): """ ResNet-1001 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_svhn", **kwargs) def resnet1202_cifar10(classes=10, **kwargs): """ ResNet-1202 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_cifar10", **kwargs) def resnet1202_cifar100(classes=100, **kwargs): """ ResNet-1202 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_cifar100", **kwargs) def resnet1202_svhn(classes=10, **kwargs): """ ResNet-1202 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_svhn", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False models = [ (resnet20_cifar10, 10), (resnet20_cifar100, 100), (resnet20_svhn, 10), (resnet56_cifar10, 10), (resnet56_cifar100, 100), (resnet56_svhn, 10), (resnet110_cifar10, 10), (resnet110_cifar100, 100), (resnet110_svhn, 10), (resnet164bn_cifar10, 10), (resnet164bn_cifar100, 100), (resnet164bn_svhn, 10), (resnet272bn_cifar10, 10), (resnet272bn_cifar100, 100), (resnet272bn_svhn, 10), (resnet542bn_cifar10, 10), (resnet542bn_cifar100, 100), (resnet542bn_svhn, 10), (resnet1001_cifar10, 10), (resnet1001_cifar100, 100), (resnet1001_svhn, 10), (resnet1202_cifar10, 10), (resnet1202_cifar100, 100), (resnet1202_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 32, 32) if is_channels_first(data_format) else (batch, 32, 32, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, classes)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnet20_cifar10 or weight_count == 272474) assert (model != resnet20_cifar100 or weight_count == 278324) assert (model != resnet20_svhn or weight_count == 272474) assert (model != resnet56_cifar10 or weight_count == 855770) assert (model != resnet56_cifar100 or weight_count == 861620) assert (model != resnet56_svhn or weight_count == 855770) assert (model != resnet110_cifar10 or weight_count == 1730714) assert (model != resnet110_cifar100 or weight_count == 1736564) assert (model != resnet110_svhn or weight_count == 1730714) assert (model != resnet164bn_cifar10 or weight_count == 1704154) assert (model != resnet164bn_cifar100 or weight_count == 1727284) assert (model != resnet164bn_svhn or weight_count == 1704154) assert (model != resnet272bn_cifar10 or weight_count == 2816986) assert (model != resnet272bn_cifar100 or weight_count == 2840116) assert (model != resnet272bn_svhn or weight_count == 2816986) assert (model != resnet542bn_cifar10 or weight_count == 5599066) assert (model != resnet542bn_cifar100 or weight_count == 5622196) assert (model != resnet542bn_svhn or weight_count == 5599066) assert (model != resnet1001_cifar10 or weight_count == 10328602) assert (model != resnet1001_cifar100 or weight_count == 10351732) assert (model != resnet1001_svhn or weight_count == 10328602) assert (model != resnet1202_cifar10 or weight_count == 19424026) assert (model != resnet1202_cifar100 or weight_count == 19429876) assert (model != resnet1202_svhn or weight_count == 19424026) if __name__ == "__main__": _test()
23,420
35.883465
120
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/nasnet.py
""" NASNet-A for ImageNet-1K, implemented in TensorFlow. Original paper: 'Learning Transferable Architectures for Scalable Image Recognition,' https://arxiv.org/abs/1707.07012. """ __all__ = ['NASNet', 'nasnet_4a1056', 'nasnet_6a4032', 'nasnet_dual_path_sequential'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import MaxPool2d, AvgPool2d, BatchNorm, Conv2d, conv1x1, DualPathSequential, SimpleSequential, flatten,\ is_channels_first, get_channel_axis class NasDualPathScheme(object): """ NASNet specific scheme of dual path response for a block in a DualPathSequential module. Parameters: ---------- can_skip_input : bool Whether can skip input for some blocks. """ def __init__(self, can_skip_input): super(NasDualPathScheme, self).__init__() self.can_skip_input = can_skip_input def __call__(self, block, x, x_prev, training): """ Scheme function. Parameters: ---------- block : nn.HybridBlock A block. x : Tensor Current processed tensor. x_prev : Tensor Previous processed tensor. training : bool or None Whether to work in training mode or in inference mode. Returns: ------- x_next : Tensor Next processed tensor. x : Tensor Current processed tensor. """ x_next = block(x, x_prev, training=training) if type(x_next) == tuple: x_next, x = x_next if self.can_skip_input and hasattr(block, 'skip_input') and block.skip_input: x = x_prev return x_next, x def nasnet_dual_path_scheme_ordinal(block, x, _, training): """ NASNet specific scheme of dual path response for an ordinal block with dual inputs/outputs in a DualPathSequential block. Parameters: ---------- block : nn.HybridBlock A block. x : Tensor Current processed tensor. training : bool or None Whether to work in training mode or in inference mode. Returns: ------- x_next : Tensor Next processed tensor. x : Tensor Current processed tensor. """ return block(x, training=training), x def nasnet_dual_path_sequential(return_two=True, first_ordinals=0, last_ordinals=0, can_skip_input=False, **kwargs): """ NASNet specific dual path sequential container. Parameters: ---------- return_two : bool, default True Whether to return two output after execution. first_ordinals : int, default 0 Number of the first blocks with single input/output. last_ordinals : int, default 0 Number of the final blocks with single input/output. dual_path_scheme : function Scheme of dual path response for a block. dual_path_scheme_ordinal : function Scheme of dual path response for an ordinal block. can_skip_input : bool, default False Whether can skip input for some blocks. """ return DualPathSequential( return_two=return_two, first_ordinals=first_ordinals, last_ordinals=last_ordinals, dual_path_scheme=NasDualPathScheme(can_skip_input=can_skip_input), dual_path_scheme_ordinal=nasnet_dual_path_scheme_ordinal, **kwargs) def nasnet_batch_norm(channels, data_format="channels_last", **kwargs): """ NASNet specific Batch normalization layer. Parameters: ---------- channels : int Number of channels in input data. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ assert (channels is not None) return BatchNorm( momentum=0.1, epsilon=0.001, data_format=data_format, **kwargs) def nasnet_avgpool1x1_s2(data_format="channels_last", **kwargs): """ NASNet specific 1x1 Average pooling layer with stride 2. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return AvgPool2d( pool_size=1, strides=2, # count_include_pad=False, data_format=data_format, **kwargs) def nasnet_avgpool3x3_s1(data_format="channels_last", **kwargs): """ NASNet specific 3x3 Average pooling layer with stride 1. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return AvgPool2d( pool_size=3, strides=1, padding=1, # count_include_pad=False, data_format=data_format, **kwargs) def nasnet_avgpool3x3_s2(data_format="channels_last", **kwargs): """ NASNet specific 3x3 Average pooling layer with stride 2. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return AvgPool2d( pool_size=3, strides=2, padding=1, # count_include_pad=False, data_format=data_format, **kwargs) class NasMaxPoolBlock(nn.Layer): """ NASNet specific Max pooling layer with extra padding. Parameters: ---------- extra_padding : bool, default False Whether to use extra padding. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, extra_padding=False, data_format="channels_last", **kwargs): super(NasMaxPoolBlock, self).__init__(**kwargs) self.extra_padding = extra_padding self.data_format = data_format self.pool = MaxPool2d( pool_size=3, strides=2, padding=1, data_format=data_format, name="pool") if self.extra_padding: self.pad = nn.ZeroPadding2D( padding=((1, 0), (1, 0)), data_format=data_format) def call(self, x, training=None): if self.extra_padding: x = self.pad(x) x = self.pool(x) if self.extra_padding: if is_channels_first(self.data_format): x = x[:, :, 1:, 1:] else: x = x[:, 1:, 1:, :] return x class NasAvgPoolBlock(nn.Layer): """ NASNet specific 3x3 Average pooling layer with extra padding. Parameters: ---------- extra_padding : bool, default False Whether to use extra padding. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, extra_padding=False, data_format="channels_last", **kwargs): super(NasAvgPoolBlock, self).__init__(**kwargs) self.extra_padding = extra_padding self.data_format = data_format self.pool = AvgPool2d( pool_size=3, strides=2, padding=1, # count_include_pad=False, data_format=data_format, name="pool") if self.extra_padding: self.pad = nn.ZeroPadding2D( padding=((1, 0), (1, 0)), data_format=data_format) def call(self, x, training=None): if self.extra_padding: x = self.pad(x) x = self.pool(x) if self.extra_padding: if is_channels_first(self.data_format): x = x[:, :, 1:, 1:] else: x = x[:, 1:, 1:, :] return x class NasConv(nn.Layer): """ NASNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. groups : int Number of groups. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, groups, data_format="channels_last", **kwargs): super(NasConv, self).__init__(**kwargs) self.activ = nn.ReLU() self.conv = Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, groups=groups, use_bias=False, data_format=data_format, name="conv") self.bn = nasnet_batch_norm( channels=out_channels, data_format=data_format, name="bn") def call(self, x, training=None): x = self.activ(x) x = self.conv(x) x = self.bn(x, training=training) return x def nas_conv1x1(in_channels, out_channels, data_format="channels_last", **kwargs): """ 1x1 version of the NASNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return NasConv( in_channels=in_channels, out_channels=out_channels, kernel_size=1, strides=1, padding=0, groups=1, data_format=data_format, **kwargs) class DwsConv(nn.Layer): """ Standard depthwise separable convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. use_bias : bool, default False Whether the layers use a bias vector. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, use_bias=False, data_format="channels_last", **kwargs): super(DwsConv, self).__init__(**kwargs) self.dw_conv = Conv2d( in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, strides=strides, padding=padding, groups=in_channels, use_bias=use_bias, data_format=data_format, name="dw_conv") self.pw_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, data_format=data_format, name="pw_conv") def call(self, x, training=None): x = self.dw_conv(x, training=training) x = self.pw_conv(x, training=training) return x class NasDwsConv(nn.Layer): """ NASNet specific depthwise separable convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. extra_padding : bool, default False Whether to use extra padding. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, extra_padding=False, data_format="channels_last", **kwargs): super(NasDwsConv, self).__init__(**kwargs) self.extra_padding = extra_padding self.data_format = data_format self.activ = nn.ReLU() self.conv = DwsConv( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, use_bias=False, data_format=data_format, name="conv") self.bn = nasnet_batch_norm( channels=out_channels, data_format=data_format, name="bn") if self.extra_padding: self.pad = nn.ZeroPadding2D( padding=((1, 0), (1, 0)), data_format=data_format) def call(self, x, training=None): x = self.activ(x) if self.extra_padding: x = self.pad(x) x = self.conv(x, training=training) if self.extra_padding: if is_channels_first(self.data_format): x = x[:, :, 1:, 1:] else: x = x[:, 1:, 1:, :] x = self.bn(x, training=training) return x class DwsBranch(nn.Layer): """ NASNet specific block with depthwise separable convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, extra_padding=False, stem=False, data_format="channels_last", **kwargs): super(DwsBranch, self).__init__(**kwargs) assert (not stem) or (not extra_padding) mid_channels = out_channels if stem else in_channels self.conv1 = NasDwsConv( in_channels=in_channels, out_channels=mid_channels, kernel_size=kernel_size, strides=strides, padding=padding, extra_padding=extra_padding, data_format=data_format, name="conv1") self.conv2 = NasDwsConv( in_channels=mid_channels, out_channels=out_channels, kernel_size=kernel_size, strides=1, padding=padding, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) return x def dws_branch_k3_s1_p1(in_channels, out_channels, extra_padding=False, data_format="channels_last", **kwargs): """ 3x3/1/1 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, kernel_size=3, strides=1, padding=1, extra_padding=extra_padding, data_format=data_format, **kwargs) def dws_branch_k5_s1_p2(in_channels, out_channels, extra_padding=False, data_format="channels_last", **kwargs): """ 5x5/1/2 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, kernel_size=5, strides=1, padding=2, extra_padding=extra_padding, data_format=data_format, **kwargs) def dws_branch_k5_s2_p2(in_channels, out_channels, extra_padding=False, stem=False, data_format="channels_last", **kwargs): """ 5x5/2/2 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, kernel_size=5, strides=2, padding=2, extra_padding=extra_padding, stem=stem, data_format=data_format, **kwargs) def dws_branch_k7_s2_p3(in_channels, out_channels, extra_padding=False, stem=False, data_format="channels_last", **kwargs): """ 7x7/2/3 version of the NASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, kernel_size=7, strides=2, padding=3, extra_padding=extra_padding, stem=stem, data_format=data_format, **kwargs) class NasPathBranch(nn.Layer): """ NASNet specific `path` branch (auxiliary block). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. extra_padding : bool, default False Whether to use extra padding. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, extra_padding=False, data_format="channels_last", **kwargs): super(NasPathBranch, self).__init__(**kwargs) self.extra_padding = extra_padding self.data_format = data_format self.avgpool = nasnet_avgpool1x1_s2( data_format=data_format, name="") self.conv = conv1x1( in_channels=in_channels, out_channels=out_channels, data_format=data_format, name="") if self.extra_padding: self.pad = nn.ZeroPadding2D( padding=((0, 1), (0, 1)), data_format=data_format) def call(self, x, training=None): if self.extra_padding: x = self.pad(x) if is_channels_first(self.data_format): x = x[:, :, 1:, 1:] else: x = x[:, 1:, 1:, :] x = self.avgpool(x) x = self.conv(x, training=training) return x class NasPathBlock(nn.Layer): """ NASNet specific `path` block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(NasPathBlock, self).__init__(**kwargs) self.data_format = data_format mid_channels = out_channels // 2 self.activ = nn.ReLU() self.path1 = NasPathBranch( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="path1") self.path2 = NasPathBranch( in_channels=in_channels, out_channels=mid_channels, extra_padding=True, data_format=data_format, name="path2") self.bn = nasnet_batch_norm( channels=out_channels, data_format=data_format, name="bn") def call(self, x, training=None): x = self.activ(x) x1 = self.path1(x, training=training) x2 = self.path2(x, training=training) x = tf.concat([x1, x2], axis=get_channel_axis(self.data_format)) x = self.bn(x, training=training) return x class Stem1Unit(nn.Layer): """ NASNet Stem1 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(Stem1Unit, self).__init__(**kwargs) self.data_format = data_format mid_channels = out_channels // 4 self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1x1") self.comb0_left = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="comb0_left") self.comb0_right = dws_branch_k7_s2_p3( in_channels=in_channels, out_channels=mid_channels, stem=True, data_format=data_format, name="comb0_right") self.comb1_left = NasMaxPoolBlock( extra_padding=False, data_format=data_format, name="comb1_left") self.comb1_right = dws_branch_k7_s2_p3( in_channels=in_channels, out_channels=mid_channels, stem=True, data_format=data_format, name="comb1_right") self.comb2_left = nasnet_avgpool3x3_s2( data_format=data_format, name="comb2_left") self.comb2_right = dws_branch_k5_s2_p2( in_channels=in_channels, out_channels=mid_channels, stem=True, data_format=data_format, name="comb2_right") self.comb3_right = nasnet_avgpool3x3_s1( data_format=data_format, name="comb3_right") self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="comb4_left") self.comb4_right = NasMaxPoolBlock( extra_padding=False, data_format=data_format, name="comb4_right") def call(self, x, _=None, training=None): x_left = self.conv1x1(x, training=training) x_right = x x0 = self.comb0_left(x_left, training=training) + self.comb0_right(x_right, training=training) x1 = self.comb1_left(x_left, training=training) + self.comb1_right(x_right, training=training) x2 = self.comb2_left(x_left, training=training) + self.comb2_right(x_right, training=training) x3 = x1 + self.comb3_right(x0, training=training) x4 = self.comb4_left(x0, training=training) + self.comb4_right(x_left, training=training) x_out = tf.concat([x1, x2, x3, x4], axis=get_channel_axis(self.data_format)) return x_out class Stem2Unit(nn.Layer): """ NASNet Stem2 unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. extra_padding : bool Whether to use extra padding. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, prev_in_channels, out_channels, extra_padding, data_format="channels_last", **kwargs): super(Stem2Unit, self).__init__(**kwargs) self.data_format = data_format mid_channels = out_channels // 4 self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1x1") self.path = NasPathBlock( in_channels=prev_in_channels, out_channels=mid_channels, data_format=data_format, name="path") self.comb0_left = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding, data_format=data_format, name="comb0_left") self.comb0_right = dws_branch_k7_s2_p3( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding, data_format=data_format, name="comb0_right") self.comb1_left = NasMaxPoolBlock( extra_padding=extra_padding, data_format=data_format, name="comb1_left") self.comb1_right = dws_branch_k7_s2_p3( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding, data_format=data_format, name="comb1_right") self.comb2_left = NasAvgPoolBlock( extra_padding=extra_padding, data_format=data_format, name="comb2_left") self.comb2_right = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding, data_format=data_format, name="comb2_right") self.comb3_right = nasnet_avgpool3x3_s1( data_format=data_format, name="comb3_right") self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding, data_format=data_format, name="comb4_left") self.comb4_right = NasMaxPoolBlock( extra_padding=extra_padding, data_format=data_format, name="comb4_right") def call(self, x, x_prev, training=None): x_left = self.conv1x1(x, training=training) x_right = self.path(x_prev, training=training) x0 = self.comb0_left(x_left, training=training) + self.comb0_right(x_right, training=training) x1 = self.comb1_left(x_left, training=training) + self.comb1_right(x_right, training=training) x2 = self.comb2_left(x_left, training=training) + self.comb2_right(x_right, training=training) x3 = x1 + self.comb3_right(x0, training=training) x4 = self.comb4_left(x0, training=training) + self.comb4_right(x_left, training=training) x_out = tf.concat([x1, x2, x3, x4], axis=get_channel_axis(self.data_format)) return x_out class FirstUnit(nn.Layer): """ NASNet First unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, prev_in_channels, out_channels, data_format="channels_last", **kwargs): super(FirstUnit, self).__init__(**kwargs) self.data_format = data_format mid_channels = out_channels // 6 self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1x1") self.path = NasPathBlock( in_channels=prev_in_channels, out_channels=mid_channels, data_format=data_format, name="path") self.comb0_left = dws_branch_k5_s1_p2( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="comb0_left") self.comb0_right = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="comb0_right") self.comb1_left = dws_branch_k5_s1_p2( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="comb1_left") self.comb1_right = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="comb1_right") self.comb2_left = nasnet_avgpool3x3_s1( data_format=data_format, name="comb2_left") self.comb3_left = nasnet_avgpool3x3_s1( data_format=data_format, name="comb3_left") self.comb3_right = nasnet_avgpool3x3_s1( data_format=data_format, name="comb3_right") self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="comb4_left") def call(self, x, x_prev, training=None): x_left = self.conv1x1(x, training=training) x_right = self.path(x_prev, training=training) x0 = self.comb0_left(x_left, training=training) + self.comb0_right(x_right, training=training) x1 = self.comb1_left(x_right, training=training) + self.comb1_right(x_right, training=training) x2 = self.comb2_left(x_left, training=training) + x_right x3 = self.comb3_left(x_right, training=training) + self.comb3_right(x_right, training=training) x4 = self.comb4_left(x_left, training=training) + x_left x_out = tf.concat([x_right, x0, x1, x2, x3, x4], axis=get_channel_axis(self.data_format)) return x_out class NormalUnit(nn.Layer): """ NASNet Normal unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, prev_in_channels, out_channels, data_format="channels_last", **kwargs): super(NormalUnit, self).__init__(**kwargs) self.data_format = data_format mid_channels = out_channels // 6 self.conv1x1_prev = nas_conv1x1( in_channels=prev_in_channels, out_channels=mid_channels, data_format=data_format, name="conv1x1_prev") self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1x1") self.comb0_left = dws_branch_k5_s1_p2( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="comb0_left") self.comb0_right = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="comb0_right") self.comb1_left = dws_branch_k5_s1_p2( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="comb1_left") self.comb1_right = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="comb1_right") self.comb2_left = nasnet_avgpool3x3_s1( data_format=data_format, name="comb2_left") self.comb3_left = nasnet_avgpool3x3_s1( data_format=data_format, name="comb3_left") self.comb3_right = nasnet_avgpool3x3_s1( data_format=data_format, name="comb3_right") self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="comb4_left") def call(self, x, x_prev, training=None): x_left = self.conv1x1(x, training=training) x_right = self.conv1x1_prev(x_prev, training=training) x0 = self.comb0_left(x_left, training=training) + self.comb0_right(x_right, training=training) x1 = self.comb1_left(x_right, training=training) + self.comb1_right(x_right, training=training) x2 = self.comb2_left(x_left, training=training) + x_right x3 = self.comb3_left(x_right, training=training) + self.comb3_right(x_right, training=training) x4 = self.comb4_left(x_left, training=training) + x_left x_out = tf.concat([x_right, x0, x1, x2, x3, x4], axis=get_channel_axis(self.data_format)) return x_out class ReductionBaseUnit(nn.Layer): """ NASNet Reduction base unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. extra_padding : bool, default True Whether to use extra padding. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, prev_in_channels, out_channels, extra_padding=True, data_format="channels_last", **kwargs): super(ReductionBaseUnit, self).__init__(**kwargs) self.data_format = data_format self.skip_input = True mid_channels = out_channels // 4 self.conv1x1_prev = nas_conv1x1( in_channels=prev_in_channels, out_channels=mid_channels, data_format=data_format, name="conv1x1_prev") self.conv1x1 = nas_conv1x1( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1x1") self.comb0_left = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding, data_format=data_format, name="comb0_left") self.comb0_right = dws_branch_k7_s2_p3( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding, data_format=data_format, name="comb0_right") self.comb1_left = NasMaxPoolBlock( extra_padding=extra_padding, data_format=data_format, name="comb1_left") self.comb1_right = dws_branch_k7_s2_p3( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding, data_format=data_format, name="comb1_right") self.comb2_left = NasAvgPoolBlock( extra_padding=extra_padding, data_format=data_format, name="comb2_left") self.comb2_right = dws_branch_k5_s2_p2( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding, data_format=data_format, name="comb2_right") self.comb3_right = nasnet_avgpool3x3_s1( data_format=data_format, name="comb3_right") self.comb4_left = dws_branch_k3_s1_p1( in_channels=mid_channels, out_channels=mid_channels, extra_padding=extra_padding, data_format=data_format, name="comb4_left") self.comb4_right = NasMaxPoolBlock( extra_padding=extra_padding, data_format=data_format, name="comb4_right") def call(self, x, x_prev, training=None): x_left = self.conv1x1(x, training=training) x_right = self.conv1x1_prev(x_prev, training=training) x0 = self.comb0_left(x_left, training=training) + self.comb0_right(x_right, training=training) x1 = self.comb1_left(x_left, training=training) + self.comb1_right(x_right, training=training) x2 = self.comb2_left(x_left, training=training) + self.comb2_right(x_right, training=training) x3 = x1 + self.comb3_right(x0, training=training) x4 = self.comb4_left(x0, training=training) + self.comb4_right(x_left, training=training) x_out = tf.concat([x1, x2, x3, x4], axis=get_channel_axis(self.data_format)) return x_out class Reduction1Unit(ReductionBaseUnit): """ NASNet Reduction1 unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, prev_in_channels, out_channels, data_format="channels_last", **kwargs): super(Reduction1Unit, self).__init__( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, extra_padding=True, data_format=data_format, **kwargs) class Reduction2Unit(ReductionBaseUnit): """ NASNet Reduction2 unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. extra_padding : bool Whether to use extra padding. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, prev_in_channels, out_channels, extra_padding, data_format="channels_last", **kwargs): super(Reduction2Unit, self).__init__( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, extra_padding=extra_padding, data_format=data_format, **kwargs) class NASNetInitBlock(nn.Layer): """ NASNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(NASNetInitBlock, self).__init__(**kwargs) self.conv = Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, strides=2, padding=0, use_bias=False, data_format=data_format, name="conv") self.bn = nasnet_batch_norm( channels=out_channels, data_format=data_format, name="bn") def call(self, x, training=None): x = self.conv(x) x = self.bn(x, training=training) return x class NASNet(tf.keras.Model): """ NASNet-A model from 'Learning Transferable Architectures for Scalable Image Recognition,' https://arxiv.org/abs/1707.07012. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. stem_blocks_channels : list of 2 int Number of output channels for the Stem units. final_pool_size : int Size of the pooling windows for final pool. extra_padding : bool Whether to use extra padding. skip_reduction_layer_input : bool Whether to skip the reduction layers when calculating the previous layer to connect to. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, stem_blocks_channels, final_pool_size, extra_padding, skip_reduction_layer_input, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(NASNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format reduction_units = [Reduction1Unit, Reduction2Unit] self.features = nasnet_dual_path_sequential( return_two=False, first_ordinals=1, last_ordinals=2, name="features") self.features.children.append(NASNetInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels out_channels = stem_blocks_channels[0] self.features.children.append(Stem1Unit( in_channels=in_channels, out_channels=out_channels, data_format=data_format, name="stem1_unit")) prev_in_channels = in_channels in_channels = out_channels out_channels = stem_blocks_channels[1] self.features.children.append(Stem2Unit( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, extra_padding=extra_padding, data_format=data_format, name="stem2_unit")) prev_in_channels = in_channels in_channels = out_channels for i, channels_per_stage in enumerate(channels): stage = nasnet_dual_path_sequential( can_skip_input=skip_reduction_layer_input, name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): if (j == 0) and (i != 0): unit = reduction_units[i - 1] elif ((i == 0) and (j == 0)) or ((i != 0) and (j == 1)): unit = FirstUnit else: unit = NormalUnit if unit == Reduction2Unit: stage.children.append(Reduction2Unit( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, extra_padding=extra_padding, data_format=data_format, name="unit{}".format(j + 1))) else: stage.children.append(unit( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, data_format=data_format, name="unit{}".format(j + 1))) prev_in_channels = in_channels in_channels = out_channels self.features.children.append(stage) self.features.children.append(nn.ReLU(name="activ")) self.features.children.append(nn.AveragePooling2D( pool_size=final_pool_size, strides=1, data_format=data_format, name="final_pool")) self.output1 = SimpleSequential(name="output1") self.output1.add(nn.Dropout( rate=0.5, name="dropout")) self.output1.add(nn.Dense( units=classes, input_dim=in_channels, name="fc")) def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_nasnet(repeat, penultimate_filters, init_block_channels, final_pool_size, extra_padding, skip_reduction_layer_input, in_size, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create NASNet-A model with specific parameters. Parameters: ---------- repeat : int NNumber of cell repeats. penultimate_filters : int Number of filters in the penultimate layer of the network. init_block_channels : int Number of output channels for the initial unit. final_pool_size : int Size of the pooling windows for final pool. extra_padding : bool Whether to use extra padding. skip_reduction_layer_input : bool Whether to skip the reduction layers when calculating the previous layer to connect to. in_size : tuple of two ints Spatial size of the expected input image. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ stem_blocks_channels = [1, 2] reduct_channels = [[], [8], [16]] norm_channels = [6, 12, 24] channels = [rci + [nci] * repeat for rci, nci in zip(reduct_channels, norm_channels)] base_channel_chunk = penultimate_filters // channels[-1][-1] stem_blocks_channels = [(ci * base_channel_chunk) for ci in stem_blocks_channels] channels = [[(cij * base_channel_chunk) for cij in ci] for ci in channels] net = NASNet( channels=channels, init_block_channels=init_block_channels, stem_blocks_channels=stem_blocks_channels, final_pool_size=final_pool_size, extra_padding=extra_padding, skip_reduction_layer_input=skip_reduction_layer_input, in_size=in_size, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def nasnet_4a1056(**kwargs): """ NASNet-A 4@1056 (NASNet-A-Mobile) model from 'Learning Transferable Architectures for Scalable Image Recognition,' https://arxiv.org/abs/1707.07012. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_nasnet( repeat=4, penultimate_filters=1056, init_block_channels=32, final_pool_size=7, extra_padding=True, skip_reduction_layer_input=False, in_size=(224, 224), model_name="nasnet_4a1056", **kwargs) def nasnet_6a4032(**kwargs): """ NASNet-A 6@4032 (NASNet-A-Large) model from 'Learning Transferable Architectures for Scalable Image Recognition,' https://arxiv.org/abs/1707.07012. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_nasnet( repeat=6, penultimate_filters=4032, init_block_channels=96, final_pool_size=11, extra_padding=False, skip_reduction_layer_input=True, in_size=(331, 331), model_name="nasnet_6a4032", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ nasnet_4a1056, nasnet_6a4032, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 331, 331) if is_channels_first(data_format) else (batch, 331, 331, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != nasnet_4a1056 or weight_count == 5289978) assert (model != nasnet_6a4032 or weight_count == 88753150) if __name__ == "__main__": _test()
52,300
31.047181
118
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/resnext_cifar.py
""" ResNeXt for CIFAR/SVHN, implemented in TensorFlow. Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. """ __all__ = ['CIFARResNeXt', 'resnext20_1x64d_cifar10', 'resnext20_1x64d_cifar100', 'resnext20_1x64d_svhn', 'resnext20_2x32d_cifar10', 'resnext20_2x32d_cifar100', 'resnext20_2x32d_svhn', 'resnext20_2x64d_cifar10', 'resnext20_2x64d_cifar100', 'resnext20_2x64d_svhn', 'resnext20_4x16d_cifar10', 'resnext20_4x16d_cifar100', 'resnext20_4x16d_svhn', 'resnext20_4x32d_cifar10', 'resnext20_4x32d_cifar100', 'resnext20_4x32d_svhn', 'resnext20_8x8d_cifar10', 'resnext20_8x8d_cifar100', 'resnext20_8x8d_svhn', 'resnext20_8x16d_cifar10', 'resnext20_8x16d_cifar100', 'resnext20_8x16d_svhn', 'resnext20_16x4d_cifar10', 'resnext20_16x4d_cifar100', 'resnext20_16x4d_svhn', 'resnext20_16x8d_cifar10', 'resnext20_16x8d_cifar100', 'resnext20_16x8d_svhn', 'resnext20_32x2d_cifar10', 'resnext20_32x2d_cifar100', 'resnext20_32x2d_svhn', 'resnext20_32x4d_cifar10', 'resnext20_32x4d_cifar100', 'resnext20_32x4d_svhn', 'resnext20_64x1d_cifar10', 'resnext20_64x1d_cifar100', 'resnext20_64x1d_svhn', 'resnext20_64x2d_cifar10', 'resnext20_64x2d_cifar100', 'resnext20_64x2d_svhn', 'resnext29_32x4d_cifar10', 'resnext29_32x4d_cifar100', 'resnext29_32x4d_svhn', 'resnext29_16x64d_cifar10', 'resnext29_16x64d_cifar100', 'resnext29_16x64d_svhn', 'resnext56_1x64d_cifar10', 'resnext56_1x64d_cifar100', 'resnext56_1x64d_svhn', 'resnext56_2x32d_cifar10', 'resnext56_2x32d_cifar100', 'resnext56_2x32d_svhn', 'resnext56_4x16d_cifar10', 'resnext56_4x16d_cifar100', 'resnext56_4x16d_svhn', 'resnext56_8x8d_cifar10', 'resnext56_8x8d_cifar100', 'resnext56_8x8d_svhn', 'resnext56_16x4d_cifar10', 'resnext56_16x4d_cifar100', 'resnext56_16x4d_svhn', 'resnext56_32x2d_cifar10', 'resnext56_32x2d_cifar100', 'resnext56_32x2d_svhn', 'resnext56_64x1d_cifar10', 'resnext56_64x1d_cifar100', 'resnext56_64x1d_svhn', 'resnext272_1x64d_cifar10', 'resnext272_1x64d_cifar100', 'resnext272_1x64d_svhn', 'resnext272_2x32d_cifar10', 'resnext272_2x32d_cifar100', 'resnext272_2x32d_svhn'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv3x3_block, SimpleSequential, flatten, is_channels_first from .resnext import ResNeXtUnit class CIFARResNeXt(tf.keras.Model): """ ResNeXt model for CIFAR from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(32, 32), classes=10, data_format="channels_last", **kwargs): super(CIFARResNeXt, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(ResNeXtUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, cardinality=cardinality, bottleneck_width=bottleneck_width, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=8, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_resnext_cifar(classes, blocks, cardinality, bottleneck_width, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ ResNeXt model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ assert (blocks - 2) % 9 == 0 layers = [(blocks - 2) // 9] * 3 channels_per_layers = [256, 512, 1024] init_block_channels = 64 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = CIFARResNeXt( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def resnext20_1x64d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (1x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=1, bottleneck_width=64, model_name="resnext20_1x64d_cifar10", **kwargs) def resnext20_1x64d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (1x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=1, bottleneck_width=64, model_name="resnext20_1x64d_cifar100", **kwargs) def resnext20_1x64d_svhn(classes=10, **kwargs): """ ResNeXt-20 (1x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=1, bottleneck_width=64, model_name="resnext20_1x64d_svhn", **kwargs) def resnext20_2x32d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (2x32d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=32, model_name="resnext20_2x32d_cifar10", **kwargs) def resnext20_2x32d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (2x32d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=32, model_name="resnext20_2x32d_cifar100", **kwargs) def resnext20_2x32d_svhn(classes=10, **kwargs): """ ResNeXt-20 (2x32d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=32, model_name="resnext20_2x32d_svhn", **kwargs) def resnext20_2x64d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (2x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=64, model_name="resnext20_2x64d_cifar10", **kwargs) def resnext20_2x64d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (2x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=64, model_name="resnext20_2x64d_cifar100", **kwargs) def resnext20_2x64d_svhn(classes=10, **kwargs): """ ResNeXt-20 (2x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=64, model_name="resnext20_2x64d_svhn", **kwargs) def resnext20_4x16d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (4x16d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=16, model_name="resnext20_4x16d_cifar10", **kwargs) def resnext20_4x16d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (4x16d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=16, model_name="resnext20_4x16d_cifar100", **kwargs) def resnext20_4x16d_svhn(classes=10, **kwargs): """ ResNeXt-20 (4x16d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=16, model_name="resnext20_4x16d_svhn", **kwargs) def resnext20_4x32d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (4x32d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=32, model_name="resnext20_4x32d_cifar10", **kwargs) def resnext20_4x32d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (4x32d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=32, model_name="resnext20_4x32d_cifar100", **kwargs) def resnext20_4x32d_svhn(classes=10, **kwargs): """ ResNeXt-20 (4x32d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=32, model_name="resnext20_4x32d_svhn", **kwargs) def resnext20_8x8d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (8x8d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=8, model_name="resnext20_8x8d_cifar10", **kwargs) def resnext20_8x8d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (8x8d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=8, model_name="resnext20_8x8d_cifar100", **kwargs) def resnext20_8x8d_svhn(classes=10, **kwargs): """ ResNeXt-20 (8x8d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=8, model_name="resnext20_8x8d_svhn", **kwargs) def resnext20_8x16d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (8x16d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=16, model_name="resnext20_8x16d_cifar10", **kwargs) def resnext20_8x16d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (8x16d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=16, model_name="resnext20_8x16d_cifar100", **kwargs) def resnext20_8x16d_svhn(classes=10, **kwargs): """ ResNeXt-20 (8x16d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=16, model_name="resnext20_8x16d_svhn", **kwargs) def resnext20_16x4d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (16x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=4, model_name="resnext20_16x4d_cifar10", **kwargs) def resnext20_16x4d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (16x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=4, model_name="resnext20_16x4d_cifar100", **kwargs) def resnext20_16x4d_svhn(classes=10, **kwargs): """ ResNeXt-20 (16x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=4, model_name="resnext20_16x4d_svhn", **kwargs) def resnext20_16x8d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (16x8d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=8, model_name="resnext20_16x8d_cifar10", **kwargs) def resnext20_16x8d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (16x8d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=8, model_name="resnext20_16x8d_cifar100", **kwargs) def resnext20_16x8d_svhn(classes=10, **kwargs): """ ResNeXt-20 (16x8d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=8, model_name="resnext20_16x8d_svhn", **kwargs) def resnext20_32x2d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (32x2d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=2, model_name="resnext20_32x2d_cifar10", **kwargs) def resnext20_32x2d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (32x2d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=2, model_name="resnext20_32x2d_cifar100", **kwargs) def resnext20_32x2d_svhn(classes=10, **kwargs): """ ResNeXt-20 (32x2d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=2, model_name="resnext20_32x2d_svhn", **kwargs) def resnext20_32x4d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (32x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=4, model_name="resnext20_32x4d_cifar10", **kwargs) def resnext20_32x4d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (32x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=4, model_name="resnext20_32x4d_cifar100", **kwargs) def resnext20_32x4d_svhn(classes=10, **kwargs): """ ResNeXt-20 (32x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=4, model_name="resnext20_32x4d_svhn", **kwargs) def resnext20_64x1d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (64x1d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=1, model_name="resnext20_64x1d_cifar10", **kwargs) def resnext20_64x1d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (64x1d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=1, model_name="resnext20_64x1d_cifar100", **kwargs) def resnext20_64x1d_svhn(classes=10, **kwargs): """ ResNeXt-20 (64x1d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=1, model_name="resnext20_64x1d_svhn", **kwargs) def resnext20_64x2d_cifar10(classes=10, **kwargs): """ ResNeXt-20 (64x2d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=2, model_name="resnext20_64x2d_cifar10", **kwargs) def resnext20_64x2d_cifar100(classes=100, **kwargs): """ ResNeXt-20 (64x2d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=2, model_name="resnext20_64x2d_cifar100", **kwargs) def resnext20_64x2d_svhn(classes=10, **kwargs): """ ResNeXt-20 (64x1d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=2, model_name="resnext20_64x2d_svhn", **kwargs) def resnext29_32x4d_cifar10(classes=10, **kwargs): """ ResNeXt-29 (32x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=29, cardinality=32, bottleneck_width=4, model_name="resnext29_32x4d_cifar10", **kwargs) def resnext29_32x4d_cifar100(classes=100, **kwargs): """ ResNeXt-29 (32x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=29, cardinality=32, bottleneck_width=4, model_name="resnext29_32x4d_cifar100", **kwargs) def resnext29_32x4d_svhn(classes=10, **kwargs): """ ResNeXt-29 (32x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=29, cardinality=32, bottleneck_width=4, model_name="resnext29_32x4d_svhn", **kwargs) def resnext29_16x64d_cifar10(classes=10, **kwargs): """ ResNeXt-29 (16x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=29, cardinality=16, bottleneck_width=64, model_name="resnext29_16x64d_cifar10", **kwargs) def resnext29_16x64d_cifar100(classes=100, **kwargs): """ ResNeXt-29 (16x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=29, cardinality=16, bottleneck_width=64, model_name="resnext29_16x64d_cifar100", **kwargs) def resnext29_16x64d_svhn(classes=10, **kwargs): """ ResNeXt-29 (16x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=29, cardinality=16, bottleneck_width=64, model_name="resnext29_16x64d_svhn", **kwargs) def resnext56_1x64d_cifar10(classes=10, **kwargs): """ ResNeXt-56 (1x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=1, bottleneck_width=64, model_name="resnext56_1x64d_cifar10", **kwargs) def resnext56_1x64d_cifar100(classes=100, **kwargs): """ ResNeXt-56 (1x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=1, bottleneck_width=64, model_name="resnext56_1x64d_cifar100", **kwargs) def resnext56_1x64d_svhn(classes=10, **kwargs): """ ResNeXt-56 (1x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=1, bottleneck_width=64, model_name="resnext56_1x64d_svhn", **kwargs) def resnext56_2x32d_cifar10(classes=10, **kwargs): """ ResNeXt-56 (2x32d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=2, bottleneck_width=32, model_name="resnext56_2x32d_cifar10", **kwargs) def resnext56_2x32d_cifar100(classes=100, **kwargs): """ ResNeXt-56 (2x32d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=2, bottleneck_width=32, model_name="resnext56_2x32d_cifar100", **kwargs) def resnext56_2x32d_svhn(classes=10, **kwargs): """ ResNeXt-56 (2x32d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=2, bottleneck_width=32, model_name="resnext56_2x32d_svhn", **kwargs) def resnext56_4x16d_cifar10(classes=10, **kwargs): """ ResNeXt-56 (4x16d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=4, bottleneck_width=16, model_name="resnext56_4x16d_cifar10", **kwargs) def resnext56_4x16d_cifar100(classes=100, **kwargs): """ ResNeXt-56 (4x16d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=4, bottleneck_width=16, model_name="resnext56_4x16d_cifar100", **kwargs) def resnext56_4x16d_svhn(classes=10, **kwargs): """ ResNeXt-56 (4x16d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=4, bottleneck_width=16, model_name="resnext56_4x16d_svhn", **kwargs) def resnext56_8x8d_cifar10(classes=10, **kwargs): """ ResNeXt-56 (8x8d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=8, bottleneck_width=8, model_name="resnext56_8x8d_cifar10", **kwargs) def resnext56_8x8d_cifar100(classes=100, **kwargs): """ ResNeXt-56 (8x8d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=8, bottleneck_width=8, model_name="resnext56_8x8d_cifar100", **kwargs) def resnext56_8x8d_svhn(classes=10, **kwargs): """ ResNeXt-56 (8x8d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=8, bottleneck_width=8, model_name="resnext56_8x8d_svhn", **kwargs) def resnext56_16x4d_cifar10(classes=10, **kwargs): """ ResNeXt-56 (16x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=16, bottleneck_width=4, model_name="resnext56_16x4d_cifar10", **kwargs) def resnext56_16x4d_cifar100(classes=100, **kwargs): """ ResNeXt-56 (16x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=16, bottleneck_width=4, model_name="resnext56_16x4d_cifar100", **kwargs) def resnext56_16x4d_svhn(classes=10, **kwargs): """ ResNeXt-56 (16x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=16, bottleneck_width=4, model_name="resnext56_16x4d_svhn", **kwargs) def resnext56_32x2d_cifar10(classes=10, **kwargs): """ ResNeXt-56 (32x2d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=32, bottleneck_width=2, model_name="resnext56_32x2d_cifar10", **kwargs) def resnext56_32x2d_cifar100(classes=100, **kwargs): """ ResNeXt-56 (32x2d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=32, bottleneck_width=2, model_name="resnext56_32x2d_cifar100", **kwargs) def resnext56_32x2d_svhn(classes=10, **kwargs): """ ResNeXt-56 (32x2d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=32, bottleneck_width=2, model_name="resnext56_32x2d_svhn", **kwargs) def resnext56_64x1d_cifar10(classes=10, **kwargs): """ ResNeXt-56 (64x1d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=64, bottleneck_width=1, model_name="resnext56_64x1d_cifar10", **kwargs) def resnext56_64x1d_cifar100(classes=100, **kwargs): """ ResNeXt-56 (64x1d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=64, bottleneck_width=1, model_name="resnext56_64x1d_cifar100", **kwargs) def resnext56_64x1d_svhn(classes=10, **kwargs): """ ResNeXt-56 (64x1d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=64, bottleneck_width=1, model_name="resnext56_64x1d_svhn", **kwargs) def resnext272_1x64d_cifar10(classes=10, **kwargs): """ ResNeXt-272 (1x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=272, cardinality=1, bottleneck_width=64, model_name="resnext272_1x64d_cifar10", **kwargs) def resnext272_1x64d_cifar100(classes=100, **kwargs): """ ResNeXt-272 (1x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=272, cardinality=1, bottleneck_width=64, model_name="resnext272_1x64d_cifar100", **kwargs) def resnext272_1x64d_svhn(classes=10, **kwargs): """ ResNeXt-272 (1x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=272, cardinality=1, bottleneck_width=64, model_name="resnext272_1x64d_svhn", **kwargs) def resnext272_2x32d_cifar10(classes=10, **kwargs): """ ResNeXt-272 (2x32d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=272, cardinality=2, bottleneck_width=32, model_name="resnext272_2x32d_cifar10", **kwargs) def resnext272_2x32d_cifar100(classes=100, **kwargs): """ ResNeXt-272 (2x32d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=272, cardinality=2, bottleneck_width=32, model_name="resnext272_2x32d_cifar100", **kwargs) def resnext272_2x32d_svhn(classes=10, **kwargs): """ ResNeXt-272 (2x32d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=272, cardinality=2, bottleneck_width=32, model_name="resnext272_2x32d_svhn", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False models = [ (resnext20_1x64d_cifar10, 10), (resnext20_1x64d_cifar100, 100), (resnext20_1x64d_svhn, 10), (resnext20_2x32d_cifar10, 10), (resnext20_2x32d_cifar100, 100), (resnext20_2x32d_svhn, 10), (resnext20_2x64d_cifar10, 10), (resnext20_2x64d_cifar100, 100), (resnext20_2x64d_svhn, 10), (resnext20_4x16d_cifar10, 10), (resnext20_4x16d_cifar100, 100), (resnext20_4x16d_svhn, 10), (resnext20_4x32d_cifar10, 10), (resnext20_4x32d_cifar100, 100), (resnext20_4x32d_svhn, 10), (resnext20_8x8d_cifar10, 10), (resnext20_8x8d_cifar100, 100), (resnext20_8x8d_svhn, 10), (resnext20_8x16d_cifar10, 10), (resnext20_8x16d_cifar100, 100), (resnext20_8x16d_svhn, 10), (resnext20_16x4d_cifar10, 10), (resnext20_16x4d_cifar100, 100), (resnext20_16x4d_svhn, 10), (resnext20_16x8d_cifar10, 10), (resnext20_16x8d_cifar100, 100), (resnext20_16x8d_svhn, 10), (resnext20_32x2d_cifar10, 10), (resnext20_32x2d_cifar100, 100), (resnext20_32x2d_svhn, 10), (resnext20_32x4d_cifar10, 10), (resnext20_32x4d_cifar100, 100), (resnext20_32x4d_svhn, 10), (resnext20_64x1d_cifar10, 10), (resnext20_64x1d_cifar100, 100), (resnext20_64x1d_svhn, 10), (resnext20_64x2d_cifar10, 10), (resnext20_64x2d_cifar100, 100), (resnext20_64x2d_svhn, 10), (resnext29_32x4d_cifar10, 10), (resnext29_32x4d_cifar100, 100), (resnext29_32x4d_svhn, 10), (resnext29_16x64d_cifar10, 10), (resnext29_16x64d_cifar100, 100), (resnext29_16x64d_svhn, 10), (resnext56_1x64d_cifar10, 10), (resnext56_1x64d_cifar100, 100), (resnext56_1x64d_svhn, 10), (resnext56_2x32d_cifar10, 10), (resnext56_2x32d_cifar100, 100), (resnext56_2x32d_svhn, 10), (resnext56_4x16d_cifar10, 10), (resnext56_4x16d_cifar100, 100), (resnext56_4x16d_svhn, 10), (resnext56_8x8d_cifar10, 10), (resnext56_8x8d_cifar100, 100), (resnext56_8x8d_svhn, 10), (resnext56_16x4d_cifar10, 10), (resnext56_16x4d_cifar100, 100), (resnext56_16x4d_svhn, 10), (resnext56_32x2d_cifar10, 10), (resnext56_32x2d_cifar100, 100), (resnext56_32x2d_svhn, 10), (resnext56_64x1d_cifar10, 10), (resnext56_64x1d_cifar100, 100), (resnext56_64x1d_svhn, 10), (resnext272_1x64d_cifar10, 10), (resnext272_1x64d_cifar100, 100), (resnext272_1x64d_svhn, 10), (resnext272_2x32d_cifar10, 10), (resnext272_2x32d_cifar100, 100), (resnext272_2x32d_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 32, 32) if is_channels_first(data_format) else (batch, 32, 32, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, classes)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnext20_1x64d_cifar10 or weight_count == 3446602) assert (model != resnext20_1x64d_cifar100 or weight_count == 3538852) assert (model != resnext20_1x64d_svhn or weight_count == 3446602) assert (model != resnext20_2x32d_cifar10 or weight_count == 2672458) assert (model != resnext20_2x32d_cifar100 or weight_count == 2764708) assert (model != resnext20_2x32d_svhn or weight_count == 2672458) assert (model != resnext20_2x64d_cifar10 or weight_count == 6198602) assert (model != resnext20_2x64d_cifar100 or weight_count == 6290852) assert (model != resnext20_2x64d_svhn or weight_count == 6198602) assert (model != resnext20_4x16d_cifar10 or weight_count == 2285386) assert (model != resnext20_4x16d_cifar100 or weight_count == 2377636) assert (model != resnext20_4x16d_svhn or weight_count == 2285386) assert (model != resnext20_4x32d_cifar10 or weight_count == 4650314) assert (model != resnext20_4x32d_cifar100 or weight_count == 4742564) assert (model != resnext20_4x32d_svhn or weight_count == 4650314) assert (model != resnext20_8x8d_cifar10 or weight_count == 2091850) assert (model != resnext20_8x8d_cifar100 or weight_count == 2184100) assert (model != resnext20_8x8d_svhn or weight_count == 2091850) assert (model != resnext20_8x16d_cifar10 or weight_count == 3876170) assert (model != resnext20_8x16d_cifar100 or weight_count == 3968420) assert (model != resnext20_8x16d_svhn or weight_count == 3876170) assert (model != resnext20_16x4d_cifar10 or weight_count == 1995082) assert (model != resnext20_16x4d_cifar100 or weight_count == 2087332) assert (model != resnext20_16x4d_svhn or weight_count == 1995082) assert (model != resnext20_16x8d_cifar10 or weight_count == 3489098) assert (model != resnext20_16x8d_cifar100 or weight_count == 3581348) assert (model != resnext20_16x8d_svhn or weight_count == 3489098) assert (model != resnext20_32x2d_cifar10 or weight_count == 1946698) assert (model != resnext20_32x2d_cifar100 or weight_count == 2038948) assert (model != resnext20_32x2d_svhn or weight_count == 1946698) assert (model != resnext20_32x4d_cifar10 or weight_count == 3295562) assert (model != resnext20_32x4d_cifar100 or weight_count == 3387812) assert (model != resnext20_32x4d_svhn or weight_count == 3295562) assert (model != resnext20_64x1d_cifar10 or weight_count == 1922506) assert (model != resnext20_64x1d_cifar100 or weight_count == 2014756) assert (model != resnext20_64x1d_svhn or weight_count == 1922506) assert (model != resnext20_64x2d_cifar10 or weight_count == 3198794) assert (model != resnext20_64x2d_cifar100 or weight_count == 3291044) assert (model != resnext20_64x2d_svhn or weight_count == 3198794) assert (model != resnext29_32x4d_cifar10 or weight_count == 4775754) assert (model != resnext29_32x4d_cifar100 or weight_count == 4868004) assert (model != resnext29_32x4d_svhn or weight_count == 4775754) assert (model != resnext29_16x64d_cifar10 or weight_count == 68155210) assert (model != resnext29_16x64d_cifar100 or weight_count == 68247460) assert (model != resnext29_16x64d_svhn or weight_count == 68155210) assert (model != resnext56_1x64d_cifar10 or weight_count == 9317194) assert (model != resnext56_1x64d_cifar100 or weight_count == 9409444) assert (model != resnext56_1x64d_svhn or weight_count == 9317194) assert (model != resnext56_2x32d_cifar10 or weight_count == 6994762) assert (model != resnext56_2x32d_cifar100 or weight_count == 7087012) assert (model != resnext56_2x32d_svhn or weight_count == 6994762) assert (model != resnext56_4x16d_cifar10 or weight_count == 5833546) assert (model != resnext56_4x16d_cifar100 or weight_count == 5925796) assert (model != resnext56_4x16d_svhn or weight_count == 5833546) assert (model != resnext56_8x8d_cifar10 or weight_count == 5252938) assert (model != resnext56_8x8d_cifar100 or weight_count == 5345188) assert (model != resnext56_8x8d_svhn or weight_count == 5252938) assert (model != resnext56_16x4d_cifar10 or weight_count == 4962634) assert (model != resnext56_16x4d_cifar100 or weight_count == 5054884) assert (model != resnext56_16x4d_svhn or weight_count == 4962634) assert (model != resnext56_32x2d_cifar10 or weight_count == 4817482) assert (model != resnext56_32x2d_cifar100 or weight_count == 4909732) assert (model != resnext56_32x2d_svhn or weight_count == 4817482) assert (model != resnext56_64x1d_cifar10 or weight_count == 4744906) assert (model != resnext56_64x1d_cifar100 or weight_count == 4837156) assert (model != resnext56_64x1d_svhn or weight_count == 4744906) assert (model != resnext272_1x64d_cifar10 or weight_count == 44540746) assert (model != resnext272_1x64d_cifar100 or weight_count == 44632996) assert (model != resnext272_1x64d_svhn or weight_count == 44540746) assert (model != resnext272_2x32d_cifar10 or weight_count == 32928586) assert (model != resnext272_2x32d_cifar100 or weight_count == 33020836) assert (model != resnext272_2x32d_svhn or weight_count == 32928586) if __name__ == "__main__": _test()
65,482
38.904327
116
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/densenet_cifar.py
""" DenseNet for CIFAR/SVHN, implemented in TensorFlow. Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. """ __all__ = ['CIFARDenseNet', 'densenet40_k12_cifar10', 'densenet40_k12_cifar100', 'densenet40_k12_svhn', 'densenet40_k12_bc_cifar10', 'densenet40_k12_bc_cifar100', 'densenet40_k12_bc_svhn', 'densenet40_k24_bc_cifar10', 'densenet40_k24_bc_cifar100', 'densenet40_k24_bc_svhn', 'densenet40_k36_bc_cifar10', 'densenet40_k36_bc_cifar100', 'densenet40_k36_bc_svhn', 'densenet100_k12_cifar10', 'densenet100_k12_cifar100', 'densenet100_k12_svhn', 'densenet100_k24_cifar10', 'densenet100_k24_cifar100', 'densenet100_k24_svhn', 'densenet100_k12_bc_cifar10', 'densenet100_k12_bc_cifar100', 'densenet100_k12_bc_svhn', 'densenet190_k40_bc_cifar10', 'densenet190_k40_bc_cifar100', 'densenet190_k40_bc_svhn', 'densenet250_k24_bc_cifar10', 'densenet250_k24_bc_cifar100', 'densenet250_k24_bc_svhn'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv3x3, pre_conv3x3_block, SimpleSequential, flatten, get_channel_axis, is_channels_first from .preresnet import PreResActivation from .densenet import DenseUnit, TransitionBlock class DenseSimpleUnit(nn.Layer): """ DenseNet simple unit for CIFAR. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, dropout_rate, data_format="channels_last", **kwargs): super(DenseSimpleUnit, self).__init__(**kwargs) self.data_format = data_format self.use_dropout = (dropout_rate != 0.0) inc_channels = out_channels - in_channels self.conv = pre_conv3x3_block( in_channels=in_channels, out_channels=inc_channels, data_format=data_format, name="conv") if self.use_dropout: self.dropout = nn.Dropout( rate=dropout_rate, name="dropout") def call(self, x, training=None): identity = x x = self.conv(x, training=training) if self.use_dropout: x = self.dropout(x, training=training) x = tf.concat([identity, x], axis=get_channel_axis(self.data_format)) return x class CIFARDenseNet(tf.keras.Model): """ DenseNet model for CIFAR from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. classes : int, default 10 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, bottleneck, dropout_rate=0.0, in_channels=3, in_size=(32, 32), classes=10, data_format="channels_last", **kwargs): super(CIFARDenseNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format unit_class = DenseUnit if bottleneck else DenseSimpleUnit self.features = SimpleSequential(name="features") self.features.add(conv3x3( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) if i != 0: stage.add(TransitionBlock( in_channels=in_channels, out_channels=(in_channels // 2), data_format=data_format, name="trans{}".format(i + 1))) in_channels = in_channels // 2 for j, out_channels in enumerate(channels_per_stage): stage.add(unit_class( in_channels=in_channels, out_channels=out_channels, dropout_rate=dropout_rate, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(PreResActivation( in_channels=in_channels, data_format=data_format, name="post_activ")) self.features.add(nn.AveragePooling2D( pool_size=8, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_densenet_cifar(classes, blocks, growth_rate, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create DenseNet model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. growth_rate : int Growth rate. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 4) % 6 == 0) layers = [(blocks - 4) // 6] * 3 else: assert ((blocks - 4) % 3 == 0) layers = [(blocks - 4) // 3] * 3 init_block_channels = 2 * growth_rate from functools import reduce channels = reduce( lambda xi, yi: xi + [reduce( lambda xj, yj: xj + [xj[-1] + yj], [growth_rate] * yi, [xi[-1][-1] // 2])[1:]], layers, [[init_block_channels * 2]])[1:] net = CIFARDenseNet( channels=channels, init_block_channels=init_block_channels, classes=classes, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def densenet40_k12_cifar10(classes=10, **kwargs): """ DenseNet-40 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=False, model_name="densenet40_k12_cifar10", **kwargs) def densenet40_k12_cifar100(classes=100, **kwargs): """ DenseNet-40 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=False, model_name="densenet40_k12_cifar100", **kwargs) def densenet40_k12_svhn(classes=10, **kwargs): """ DenseNet-40 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=False, model_name="densenet40_k12_svhn", **kwargs) def densenet40_k12_bc_cifar10(classes=10, **kwargs): """ DenseNet-BC-40 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=True, model_name="densenet40_k12_bc_cifar10", **kwargs) def densenet40_k12_bc_cifar100(classes=100, **kwargs): """ DenseNet-BC-40 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=True, model_name="densenet40_k12_bc_cifar100", **kwargs) def densenet40_k12_bc_svhn(classes=10, **kwargs): """ DenseNet-BC-40 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=True, model_name="densenet40_k12_bc_svhn", **kwargs) def densenet40_k24_bc_cifar10(classes=10, **kwargs): """ DenseNet-BC-40 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True, model_name="densenet40_k24_bc_cifar10", **kwargs) def densenet40_k24_bc_cifar100(classes=100, **kwargs): """ DenseNet-BC-40 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True, model_name="densenet40_k24_bc_cifar100", **kwargs) def densenet40_k24_bc_svhn(classes=10, **kwargs): """ DenseNet-BC-40 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True, model_name="densenet40_k24_bc_svhn", **kwargs) def densenet40_k36_bc_cifar10(classes=10, **kwargs): """ DenseNet-BC-40 (k=36) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True, model_name="densenet40_k36_bc_cifar10", **kwargs) def densenet40_k36_bc_cifar100(classes=100, **kwargs): """ DenseNet-BC-40 (k=36) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True, model_name="densenet40_k36_bc_cifar100", **kwargs) def densenet40_k36_bc_svhn(classes=10, **kwargs): """ DenseNet-BC-40 (k=36) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True, model_name="densenet40_k36_bc_svhn", **kwargs) def densenet100_k12_cifar10(classes=10, **kwargs): """ DenseNet-100 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False, model_name="densenet100_k12_cifar10", **kwargs) def densenet100_k12_cifar100(classes=100, **kwargs): """ DenseNet-100 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False, model_name="densenet100_k12_cifar100", **kwargs) def densenet100_k12_svhn(classes=10, **kwargs): """ DenseNet-100 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False, model_name="densenet100_k12_svhn", **kwargs) def densenet100_k24_cifar10(classes=10, **kwargs): """ DenseNet-100 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False, model_name="densenet100_k24_cifar10", **kwargs) def densenet100_k24_cifar100(classes=100, **kwargs): """ DenseNet-100 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False, model_name="densenet100_k24_cifar100", **kwargs) def densenet100_k24_svhn(classes=10, **kwargs): """ DenseNet-100 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False, model_name="densenet100_k24_svhn", **kwargs) def densenet100_k12_bc_cifar10(classes=10, **kwargs): """ DenseNet-BC-100 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True, model_name="densenet100_k12_bc_cifar10", **kwargs) def densenet100_k12_bc_cifar100(classes=100, **kwargs): """ DenseNet-BC-100 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True, model_name="densenet100_k12_bc_cifar100", **kwargs) def densenet100_k12_bc_svhn(classes=10, **kwargs): """ DenseNet-BC-100 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True, model_name="densenet100_k12_bc_svhn", **kwargs) def densenet190_k40_bc_cifar10(classes=10, **kwargs): """ DenseNet-BC-190 (k=40) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True, model_name="densenet190_k40_bc_cifar10", **kwargs) def densenet190_k40_bc_cifar100(classes=100, **kwargs): """ DenseNet-BC-190 (k=40) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True, model_name="densenet190_k40_bc_cifar100", **kwargs) def densenet190_k40_bc_svhn(classes=10, **kwargs): """ DenseNet-BC-190 (k=40) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True, model_name="densenet190_k40_bc_svhn", **kwargs) def densenet250_k24_bc_cifar10(classes=10, **kwargs): """ DenseNet-BC-250 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True, model_name="densenet250_k24_bc_cifar10", **kwargs) def densenet250_k24_bc_cifar100(classes=100, **kwargs): """ DenseNet-BC-250 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True, model_name="densenet250_k24_bc_cifar100", **kwargs) def densenet250_k24_bc_svhn(classes=10, **kwargs): """ DenseNet-BC-250 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True, model_name="densenet250_k24_bc_svhn", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False models = [ (densenet40_k12_cifar10, 10), (densenet40_k12_cifar100, 100), (densenet40_k12_svhn, 10), (densenet40_k12_bc_cifar10, 10), (densenet40_k12_bc_cifar100, 100), (densenet40_k12_bc_svhn, 10), (densenet40_k24_bc_cifar10, 10), (densenet40_k24_bc_cifar100, 100), (densenet40_k24_bc_svhn, 10), (densenet40_k36_bc_cifar10, 10), (densenet40_k36_bc_cifar100, 100), (densenet40_k36_bc_svhn, 10), (densenet100_k12_cifar10, 10), (densenet100_k12_cifar100, 100), (densenet100_k12_svhn, 10), (densenet100_k24_cifar10, 10), (densenet100_k24_cifar100, 100), (densenet100_k24_svhn, 10), (densenet100_k12_bc_cifar10, 10), (densenet100_k12_bc_cifar100, 100), (densenet100_k12_bc_svhn, 10), (densenet190_k40_bc_cifar10, 10), (densenet190_k40_bc_cifar100, 100), (densenet190_k40_bc_svhn, 10), (densenet250_k24_bc_cifar10, 10), (densenet250_k24_bc_cifar100, 100), (densenet250_k24_bc_svhn, 10), ] for model, classes in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 32, 32) if is_channels_first(data_format) else (batch, 32, 32, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, classes)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != densenet40_k12_cifar10 or weight_count == 599050) assert (model != densenet40_k12_cifar100 or weight_count == 622360) assert (model != densenet40_k12_svhn or weight_count == 599050) assert (model != densenet40_k12_bc_cifar10 or weight_count == 176122) assert (model != densenet40_k12_bc_cifar100 or weight_count == 188092) assert (model != densenet40_k12_bc_svhn or weight_count == 176122) assert (model != densenet40_k24_bc_cifar10 or weight_count == 690346) assert (model != densenet40_k24_bc_cifar100 or weight_count == 714196) assert (model != densenet40_k24_bc_svhn or weight_count == 690346) assert (model != densenet40_k36_bc_cifar10 or weight_count == 1542682) assert (model != densenet40_k36_bc_cifar100 or weight_count == 1578412) assert (model != densenet40_k36_bc_svhn or weight_count == 1542682) assert (model != densenet100_k12_cifar10 or weight_count == 4068490) assert (model != densenet100_k12_cifar100 or weight_count == 4129600) assert (model != densenet100_k12_svhn or weight_count == 4068490) assert (model != densenet100_k24_cifar10 or weight_count == 16114138) assert (model != densenet100_k24_cifar100 or weight_count == 16236268) assert (model != densenet100_k24_svhn or weight_count == 16114138) assert (model != densenet100_k12_bc_cifar10 or weight_count == 769162) assert (model != densenet100_k12_bc_cifar100 or weight_count == 800032) assert (model != densenet100_k12_bc_svhn or weight_count == 769162) assert (model != densenet190_k40_bc_cifar10 or weight_count == 25624430) assert (model != densenet190_k40_bc_cifar100 or weight_count == 25821620) assert (model != densenet190_k40_bc_svhn or weight_count == 25624430) assert (model != densenet250_k24_bc_cifar10 or weight_count == 15324406) assert (model != densenet250_k24_bc_cifar100 or weight_count == 15480556) assert (model != densenet250_k24_bc_svhn or weight_count == 15324406) if __name__ == "__main__": _test()
30,185
37.16182
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/bninception.py
""" BN-Inception for ImageNet-1K, implemented in TensorFlow. Original paper: 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift,' https://arxiv.org/abs/1502.03167. """ __all__ = ['BNInception', 'bninception'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1_block, conv3x3_block, conv7x7_block, MaxPool2d, AvgPool2d, Concurrent, SimpleSequential,\ flatten, is_channels_first class Inception3x3Branch(nn.Layer): """ BN-Inception 3x3 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. strides : int or tuple/list of 2 int, default 1 Strides of the second convolution. use_bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layers. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, mid_channels, strides=1, use_bias=True, use_bn=True, data_format="channels_last", **kwargs): super(Inception3x3Branch, self).__init__(**kwargs) self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, strides=strides, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) return x class InceptionDouble3x3Branch(nn.Layer): """ BN-Inception double 3x3 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. strides : int or tuple/list of 2 int, default 1 Strides of the second convolution. use_bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layers. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, mid_channels, strides=1, use_bias=True, use_bn=True, data_format="channels_last", **kwargs): super(InceptionDouble3x3Branch, self).__init__(**kwargs) self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="conv2") self.conv3 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, strides=strides, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) return x class InceptionPoolBranch(nn.Layer): """ BN-Inception avg-pool branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. avg_pool : bool Whether use average pooling or max pooling. use_bias : bool Whether the convolution layer uses a bias vector. use_bn : bool Whether to use BatchNorm layers. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, avg_pool, use_bias, use_bn, data_format="channels_last", **kwargs): super(InceptionPoolBranch, self).__init__(**kwargs) if avg_pool: self.pool = AvgPool2d( pool_size=3, strides=1, padding=1, ceil_mode=True, # count_include_pad=True, data_format=data_format, name="pool") else: self.pool = MaxPool2d( pool_size=3, strides=1, padding=1, ceil_mode=True, data_format=data_format, name="pool") self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="conv") def call(self, x, training=None): x = self.pool(x) x = self.conv(x, training=training) return x class StemBlock(nn.Layer): """ BN-Inception stem block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. use_bias : bool Whether the convolution layer uses a bias vector. use_bn : bool Whether to use BatchNorm layers. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, mid_channels, use_bias, use_bn, data_format="channels_last", **kwargs): super(StemBlock, self).__init__(**kwargs) self.conv1 = conv7x7_block( in_channels=in_channels, out_channels=mid_channels, strides=2, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="conv1") self.pool1 = MaxPool2d( pool_size=3, strides=2, padding=0, ceil_mode=True, data_format=data_format, name="pool1") self.conv2 = Inception3x3Branch( in_channels=mid_channels, out_channels=out_channels, mid_channels=mid_channels, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="conv2") self.pool2 = MaxPool2d( pool_size=3, strides=2, padding=0, ceil_mode=True, data_format=data_format, name="pool2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.pool1(x) x = self.conv2(x, training=training) x = self.pool2(x) return x class InceptionBlock(nn.Layer): """ BN-Inception unit. Parameters: ---------- in_channels : int Number of input channels. mid1_channels_list : list of int Number of pre-middle channels for branches. mid2_channels_list : list of int Number of middle channels for branches. avg_pool : bool Whether use average pooling or max pooling. use_bias : bool Whether the convolution layer uses a bias vector. use_bn : bool Whether to use BatchNorm layers. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, mid1_channels_list, mid2_channels_list, avg_pool, use_bias, use_bn, data_format="channels_last", **kwargs): super(InceptionBlock, self).__init__(**kwargs) assert (len(mid1_channels_list) == 2) assert (len(mid2_channels_list) == 4) self.branches = Concurrent( data_format=data_format, name="branches") self.branches.children.append(conv1x1_block( in_channels=in_channels, out_channels=mid2_channels_list[0], use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="branch1")) self.branches.children.append(Inception3x3Branch( in_channels=in_channels, out_channels=mid2_channels_list[1], mid_channels=mid1_channels_list[0], use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="branch2")) self.branches.children.append(InceptionDouble3x3Branch( in_channels=in_channels, out_channels=mid2_channels_list[2], mid_channels=mid1_channels_list[1], use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="branch3")) self.branches.children.append(InceptionPoolBranch( in_channels=in_channels, out_channels=mid2_channels_list[3], avg_pool=avg_pool, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="branch4")) def call(self, x, training=None): x = self.branches(x, training=training) return x class ReductionBlock(nn.Layer): """ BN-Inception reduction block. Parameters: ---------- in_channels : int Number of input channels. mid1_channels_list : list of int Number of pre-middle channels for branches. mid2_channels_list : list of int Number of middle channels for branches. use_bias : bool Whether the convolution layer uses a bias vector. use_bn : bool Whether to use BatchNorm layers. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, mid1_channels_list, mid2_channels_list, use_bias, use_bn, data_format="channels_last", **kwargs): super(ReductionBlock, self).__init__(**kwargs) assert (len(mid1_channels_list) == 2) assert (len(mid2_channels_list) == 4) self.branches = Concurrent( data_format=data_format, name="branches") self.branches.children.append(Inception3x3Branch( in_channels=in_channels, out_channels=mid2_channels_list[1], mid_channels=mid1_channels_list[0], strides=2, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="branch1")) self.branches.children.append(InceptionDouble3x3Branch( in_channels=in_channels, out_channels=mid2_channels_list[2], mid_channels=mid1_channels_list[1], strides=2, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="branch2")) self.branches.children.append(MaxPool2d( pool_size=3, strides=2, padding=0, ceil_mode=True, data_format=data_format, name="branch3")) def call(self, x, training=None): x = self.branches(x, training=training) return x class BNInception(tf.keras.Model): """ BN-Inception model from 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift,' https://arxiv.org/abs/1502.03167. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels_list : list of int Number of output channels for the initial unit. mid1_channels_list : list of list of list of int Number of pre-middle channels for each unit. mid2_channels_list : list of list of list of int Number of middle channels for each unit. use_bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels_list, mid1_channels_list, mid2_channels_list, use_bias=True, use_bn=True, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(BNInception, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(StemBlock( in_channels=in_channels, out_channels=init_block_channels_list[1], mid_channels=init_block_channels_list[0], use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="init_block")) in_channels = init_block_channels_list[-1] for i, channels_per_stage in enumerate(channels): mid1_channels_list_i = mid1_channels_list[i] mid2_channels_list_i = mid2_channels_list[i] stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): if (j == 0) and (i != 0): stage.add(ReductionBlock( in_channels=in_channels, mid1_channels_list=mid1_channels_list_i[j], mid2_channels_list=mid2_channels_list_i[j], use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="unit{}".format(j + 1))) else: avg_pool = (i != len(channels) - 1) or (j != len(channels_per_stage) - 1) stage.add(InceptionBlock( in_channels=in_channels, mid1_channels_list=mid1_channels_list_i[j], mid2_channels_list=mid2_channels_list_i[j], avg_pool=avg_pool, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_bninception(model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create BN-Inception model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ init_block_channels_list = [64, 192] channels = [[256, 320], [576, 576, 576, 608, 608], [1056, 1024, 1024]] mid1_channels_list = [ [[64, 64], [64, 64]], [[128, 64], # 3c [64, 96], # 4a [96, 96], # 4a [128, 128], # 4c [128, 160]], # 4d [[128, 192], # 4e [192, 160], # 5a [192, 192]], ] mid2_channels_list = [ [[64, 64, 96, 32], [64, 96, 96, 64]], [[0, 160, 96, 0], # 3c [224, 96, 128, 128], # 4a [192, 128, 128, 128], # 4b [160, 160, 160, 128], # 4c [96, 192, 192, 128]], # 4d [[0, 192, 256, 0], # 4e [352, 320, 224, 128], # 5a [352, 320, 224, 128]], ] net = BNInception( channels=channels, init_block_channels_list=init_block_channels_list, mid1_channels_list=mid1_channels_list, mid2_channels_list=mid2_channels_list, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def bninception(**kwargs): """ BN-Inception model from 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift,' https://arxiv.org/abs/1502.03167. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_bninception(model_name="bninception", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ bninception, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != bninception or weight_count == 11295240) if __name__ == "__main__": _test()
19,733
31.726368
117
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/zfnet.py
""" ZFNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901. """ __all__ = ['zfnet', 'zfnetb'] import os import tensorflow as tf from .alexnet import AlexNet def get_zfnet(version="a", model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create ZFNet model with specific parameters. Parameters: ---------- version : str, default 'a' Version of ZFNet ('a' or 'b'). model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if version == "a": channels = [[96], [256], [384, 384, 256]] kernel_sizes = [[7], [5], [3, 3, 3]] strides = [[2], [2], [1, 1, 1]] paddings = [[1], [0], [1, 1, 1]] use_lrn = True elif version == "b": channels = [[96], [256], [512, 1024, 512]] kernel_sizes = [[7], [5], [3, 3, 3]] strides = [[2], [2], [1, 1, 1]] paddings = [[1], [0], [1, 1, 1]] use_lrn = True else: raise ValueError("Unsupported ZFNet version {}".format(version)) net = AlexNet( channels=channels, kernel_sizes=kernel_sizes, strides=strides, paddings=paddings, use_lrn=use_lrn, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def zfnet(**kwargs): """ ZFNet model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_zfnet(model_name="zfnet", **kwargs) def zfnetb(**kwargs): """ ZFNet-b model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_zfnet(version="b", model_name="zfnetb", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K pretrained = False models = [ zfnet, zfnetb, ] for model in models: net = model(pretrained=pretrained) batch = 14 x = tf.random.normal((batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != zfnet or weight_count == 62357608) assert (model != zfnetb or weight_count == 107627624) if __name__ == "__main__": _test()
3,844
29.275591
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/peleenet.py
""" PeleeNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'Pelee: A Real-Time Object Detection System on Mobile Devices,' https://arxiv.org/abs/1804.06882. """ __all__ = ['PeleeNet', 'peleenet'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1_block, conv3x3_block, Concurrent, MaxPool2d, AvgPool2d, SimpleSequential, flatten,\ is_channels_first, get_channel_axis class PeleeBranch1(nn.Layer): """ PeleeNet branch type 1 block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. strides : int or tuple/list of 2 int, default 1 Strides of the second convolution. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, mid_channels, strides=1, data_format="channels_last", **kwargs): super(PeleeBranch1, self).__init__(**kwargs) self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, strides=strides, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) return x class PeleeBranch2(nn.Layer): """ PeleeNet branch type 2 block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of intermediate channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, mid_channels, data_format="channels_last", **kwargs): super(PeleeBranch2, self).__init__(**kwargs) self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, data_format=data_format, name="conv2") self.conv3 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) return x class StemBlock(nn.Layer): """ PeleeNet stem block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(StemBlock, self).__init__(**kwargs) mid1_channels = out_channels // 2 mid2_channels = out_channels * 2 self.first_conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=2, data_format=data_format, name="first_conv") self.branches = Concurrent( data_format=data_format, name="branches") self.branches.add(PeleeBranch1( in_channels=out_channels, out_channels=out_channels, mid_channels=mid1_channels, strides=2, data_format=data_format, name="branch1")) self.branches.add(MaxPool2d( pool_size=2, strides=2, padding=0, data_format=data_format, name="branch2")) self.last_conv = conv1x1_block( in_channels=mid2_channels, out_channels=out_channels, data_format=data_format, name="last_conv") def call(self, x, training=None): x = self.first_conv(x, training=training) x = self.branches(x, training=training) x = self.last_conv(x, training=training) return x class DenseBlock(nn.Layer): """ PeleeNet dense block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bottleneck_size : int Bottleneck width. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, bottleneck_size, data_format="channels_last", **kwargs): super(DenseBlock, self).__init__(**kwargs) self.data_format = data_format inc_channels = (out_channels - in_channels) // 2 mid_channels = inc_channels * bottleneck_size self.branch1 = PeleeBranch1( in_channels=in_channels, out_channels=inc_channels, mid_channels=mid_channels, data_format=data_format, name="branch1") self.branch2 = PeleeBranch2( in_channels=in_channels, out_channels=inc_channels, mid_channels=mid_channels, data_format=data_format, name="branch2") def call(self, x, training=None): x1 = self.branch1(x, training=training) x2 = self.branch2(x, training=training) x = tf.concat([x, x1, x2], axis=get_channel_axis(self.data_format)) return x class TransitionBlock(nn.Layer): """ PeleeNet's transition block, like in DensNet, but with ordinary convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(TransitionBlock, self).__init__(**kwargs) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, data_format=data_format, name="conv") self.pool = AvgPool2d( pool_size=2, strides=2, padding=0, data_format=data_format, name="pool") def call(self, x, training=None): x = self.conv(x, training=training) x = self.pool(x) return x class PeleeNet(tf.keras.Model): """ PeleeNet model from 'Pelee: A Real-Time Object Detection System on Mobile Devices,' https://arxiv.org/abs/1804.06882. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck_sizes : list of int Bottleneck sizes for each stage. dropout_rate : float, default 0.5 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, bottleneck_sizes, dropout_rate=0.5, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(PeleeNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(StemBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): bottleneck_size = bottleneck_sizes[i] stage = SimpleSequential(name="stage{}".format(i + 1)) if i != 0: stage.add(TransitionBlock( in_channels=in_channels, out_channels=in_channels, data_format=data_format, name="trans{}".format(i + 1))) for j, out_channels in enumerate(channels_per_stage): stage.add(DenseBlock( in_channels=in_channels, out_channels=out_channels, bottleneck_size=bottleneck_size, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(conv1x1_block( in_channels=in_channels, out_channels=in_channels, data_format=data_format, name="final_block")) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = SimpleSequential(name="output1") if dropout_rate > 0.0: self.output1.add(nn.Dropout( rate=dropout_rate, name="dropout")) self.output1.add(nn.Dense( units=classes, input_dim=in_channels, name="fc")) def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_peleenet(model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create PeleeNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ init_block_channels = 32 growth_rate = 32 layers = [3, 4, 8, 6] bottleneck_sizes = [1, 2, 4, 4] from functools import reduce channels = reduce( lambda xi, yi: xi + [reduce( lambda xj, yj: xj + [xj[-1] + yj], [growth_rate] * yi, [xi[-1][-1]])[1:]], layers, [[init_block_channels]])[1:] net = PeleeNet( channels=channels, init_block_channels=init_block_channels, bottleneck_sizes=bottleneck_sizes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def peleenet(**kwargs): """ PeleeNet model from 'Pelee: A Real-Time Object Detection System on Mobile Devices,' https://arxiv.org/abs/1804.06882. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_peleenet(model_name="peleenet", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ peleenet, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != peleenet or weight_count == 2802248) if __name__ == "__main__": _test()
13,598
30.552204
117
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/__init__.py
0
0
0
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/ibppose_coco.py
""" IBPPose for COCO Keypoint, implemented in TensorFlow. Original paper: 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person Pose Estimation,' https://arxiv.org/abs/1911.10529. """ __all__ = ['IbpPose', 'ibppose_coco'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import get_activation_layer, MaxPool2d, conv1x1_block, conv3x3_block, conv7x7_block, SEBlock, Hourglass,\ InterpolationBlock, SimpleSequential, is_channels_first, get_channel_axis class IbpResBottleneck(nn.Layer): """ Bottleneck block for residual path in the residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. use_bias : bool, default False Whether the layer uses a bias vector. bottleneck_factor : int, default 2 Bottleneck factor. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, use_bias=False, bottleneck_factor=2, activation="relu", data_format="channels_last", **kwargs): super(IbpResBottleneck, self).__init__(**kwargs) mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_bias=use_bias, activation=activation, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, strides=strides, use_bias=use_bias, activation=activation, data_format=data_format, name="conv2") self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, use_bias=use_bias, activation=None, data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) return x class IbpResUnit(nn.Layer): """ ResNet-like residual unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. use_bias : bool, default False Whether the layer uses a bias vector. bottleneck_factor : int, default 2 Bottleneck factor. activation : function or str or None, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides=1, use_bias=False, bottleneck_factor=2, activation="relu", data_format="channels_last", **kwargs): super(IbpResUnit, self).__init__(**kwargs) self.resize_identity = (in_channels != out_channels) or (strides != 1) self.body = IbpResBottleneck( in_channels=in_channels, out_channels=out_channels, strides=strides, use_bias=use_bias, bottleneck_factor=bottleneck_factor, activation=activation, data_format=data_format, name="body") if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, strides=strides, use_bias=use_bias, activation=None, data_format=data_format, name="identity_conv") self.activ = get_activation_layer(activation) def call(self, x, training=None): if self.resize_identity: identity = self.identity_conv(x, training=training) else: identity = x x = self.body(x, training=training) x = x + identity x = self.activ(x) return x class IbpBackbone(nn.Layer): """ IBPPose backbone. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activation : function or str or None Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, activation, data_format="channels_last", **kwargs): super(IbpBackbone, self).__init__(**kwargs) self.data_format = data_format dilations = (3, 3, 4, 4, 5, 5) mid1_channels = out_channels // 4 mid2_channels = out_channels // 2 self.conv1 = conv7x7_block( in_channels=in_channels, out_channels=mid1_channels, strides=2, activation=activation, data_format=data_format, name="conv1") self.res1 = IbpResUnit( in_channels=mid1_channels, out_channels=mid2_channels, activation=activation, data_format=data_format, name="res1") self.pool = MaxPool2d( pool_size=2, strides=2, data_format=data_format, name="pool") self.res2 = IbpResUnit( in_channels=mid2_channels, out_channels=mid2_channels, activation=activation, data_format=data_format, name="res2") self.dilation_branch = SimpleSequential(name="dilation_branch") for i, dilation in enumerate(dilations): self.dilation_branch.add(conv3x3_block( in_channels=mid2_channels, out_channels=mid2_channels, padding=dilation, dilation=dilation, activation=activation, data_format=data_format, name="block{}".format(i + 1))) def call(self, x, training=None): x = self.conv1(x, training=training) x = self.res1(x, training=training) x = self.pool(x, training=training) x = self.res2(x, training=training) y = self.dilation_branch(x, training=training) x = tf.concat([x, y], axis=get_channel_axis(self.data_format)) return x class IbpDownBlock(nn.Layer): """ IBPPose down block for the hourglass. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activation : function or str or None Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, activation, data_format="channels_last", **kwargs): super(IbpDownBlock, self).__init__(**kwargs) self.down = MaxPool2d( pool_size=2, strides=2, data_format=data_format, name="down") self.res = IbpResUnit( in_channels=in_channels, out_channels=out_channels, activation=activation, data_format=data_format, name="res") def call(self, x, training=None): x = self.down(x, training=training) x = self.res(x, training=training) return x class IbpUpBlock(nn.Layer): """ IBPPose up block for the hourglass. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_bn : bool Whether to use BatchNorm layer. activation : function or str or None Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, use_bn, activation, data_format="channels_last", **kwargs): super(IbpUpBlock, self).__init__(**kwargs) self.res = IbpResUnit( in_channels=in_channels, out_channels=out_channels, activation=activation, data_format=data_format, name="res") self.up = InterpolationBlock( scale_factor=2, interpolation="nearest", data_format=data_format, name="up") self.conv = conv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bias=(not use_bn), use_bn=use_bn, activation=activation, data_format=data_format, name="conv") def call(self, x, training=None): x = self.res(x, training=training) x = self.up(x, training=training) x = self.conv(x, training=training) return x class MergeBlock(nn.Layer): """ IBPPose merge block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_bn : bool Whether to use BatchNorm layer. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, use_bn, data_format="channels_last", **kwargs): super(MergeBlock, self).__init__(**kwargs) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, use_bias=(not use_bn), use_bn=use_bn, activation=None, data_format=data_format, name="conv") def call(self, x, training=None): return self.conv(x, training=training) class IbpPreBlock(nn.Layer): """ IBPPose preliminary decoder block. Parameters: ---------- out_channels : int Number of output channels. use_bn : bool Whether to use BatchNorm layer. activation : function or str or None Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, out_channels, use_bn, activation, data_format="channels_last", **kwargs): super(IbpPreBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bias=(not use_bn), use_bn=use_bn, activation=activation, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bias=(not use_bn), use_bn=use_bn, activation=activation, data_format=data_format, name="conv2") self.se = SEBlock( channels=out_channels, use_conv=False, mid_activation=activation, data_format=data_format, name="se") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.se(x, training=training) return x class IbpPass(nn.Layer): """ IBPPose single pass decoder block. Parameters: ---------- channels : int Number of input/output channels. mid_channels : int Number of middle channels. depth : int Depth of hourglass. growth_rate : int Addition for number of channel for each level. use_bn : bool Whether to use BatchNorm layer. activation : function or str or None Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, mid_channels, depth, growth_rate, merge, use_bn, activation, data_format="channels_last", **kwargs): super(IbpPass, self).__init__(**kwargs) self.merge = merge down_seq = SimpleSequential(name="down_seq") up_seq = SimpleSequential(name="up_seq") skip_seq = SimpleSequential(name="skip_seq") top_channels = channels bottom_channels = channels for i in range(depth + 1): skip_seq.add(IbpResUnit( in_channels=top_channels, out_channels=top_channels, activation=activation, data_format=data_format, name="skip{}".format(i + 1))) bottom_channels += growth_rate if i < depth: down_seq.add(IbpDownBlock( in_channels=top_channels, out_channels=bottom_channels, activation=activation, data_format=data_format, name="down{}".format(i + 1))) up_seq.add(IbpUpBlock( in_channels=bottom_channels, out_channels=top_channels, use_bn=use_bn, activation=activation, data_format=data_format, name="up{}".format(i + 1))) top_channels = bottom_channels self.hg = Hourglass( down_seq=down_seq, up_seq=up_seq, skip_seq=skip_seq, name="hg") self.pre_block = IbpPreBlock( out_channels=channels, use_bn=use_bn, activation=activation, data_format=data_format, name="pre_block") self.post_block = conv1x1_block( in_channels=channels, out_channels=mid_channels, use_bias=True, use_bn=False, activation=None, data_format=data_format, name="post_block") if self.merge: self.pre_merge_block = MergeBlock( in_channels=channels, out_channels=channels, use_bn=use_bn, data_format=data_format, name="pre_merge_block") self.post_merge_block = MergeBlock( in_channels=mid_channels, out_channels=channels, use_bn=use_bn, data_format=data_format, name="post_merge_block") def call(self, x, x_prev, training=None): x = self.hg(x, training=training) if x_prev is not None: x = x + x_prev y = self.pre_block(x, training=training) z = self.post_block(y, training=training) if self.merge: z = self.post_merge_block(z, training=training) + self.pre_merge_block(y, training=training) return z class IbpPose(tf.keras.Model): """ IBPPose model from 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person Pose Estimation,' https://arxiv.org/abs/1911.10529. Parameters: ---------- passes : int Number of passes. backbone_out_channels : int Number of output channels for the backbone. outs_channels : int Number of output channels for the backbone. depth : int Depth of hourglass. growth_rate : int Addition for number of channel for each level. use_bn : bool Whether to use BatchNorm layer. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (256, 256) Spatial size of the expected input image. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, passes, backbone_out_channels, outs_channels, depth, growth_rate, use_bn, in_channels=3, in_size=(256, 256), data_format="channels_last", **kwargs): super(IbpPose, self).__init__(**kwargs) self.in_size = in_size self.data_format = data_format activation = nn.LeakyReLU(alpha=0.01) self.backbone = IbpBackbone( in_channels=in_channels, out_channels=backbone_out_channels, activation=activation, data_format=data_format, name="backbone") self.decoder = SimpleSequential(name="decoder") for i in range(passes): merge = (i != passes - 1) self.decoder.add(IbpPass( channels=backbone_out_channels, mid_channels=outs_channels, depth=depth, growth_rate=growth_rate, merge=merge, use_bn=use_bn, activation=activation, data_format=data_format, name="pass{}".format(i + 1))) def call(self, x, training=None): x = self.backbone(x, training=training) x_prev = None for block in self.decoder.children: if x_prev is not None: x = x + x_prev x_prev = block(x, x_prev, training=training) return x_prev def get_ibppose(model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create IBPPose model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ passes = 4 backbone_out_channels = 256 outs_channels = 50 depth = 4 growth_rate = 128 use_bn = True net = IbpPose( passes=passes, backbone_out_channels=backbone_out_channels, outs_channels=outs_channels, depth=depth, growth_rate=growth_rate, use_bn=use_bn, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def ibppose_coco(data_format="channels_last", **kwargs): """ IBPPose model for COCO Keypoint from 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person Pose Estimation,' https://arxiv.org/abs/1911.10529. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_ibppose(model_name="ibppose_coco", data_format=data_format, **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K # os.environ["TF_CUDNN_DETERMINISTIC"] = "1" # os.environ["TF_DETERMINISTIC_OPS"] = "1" data_format = "channels_last" # data_format = "channels_first" in_size = (256, 256) pretrained = False models = [ ibppose_coco, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else (batch, in_size[0], in_size[1], 3)) y = net(x) assert (y.shape[0] == batch) if is_channels_first(data_format): assert ((y.shape[1] == 50) and (y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4)) else: assert ((y.shape[3] == 50) and (y.shape[1] == x.shape[1] // 4) and (y.shape[2] == x.shape[2] // 4)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != ibppose_coco or weight_count == 95827784) if __name__ == "__main__": _test()
22,000
31.402062
118
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/xception.py
""" Xception for ImageNet-1K, implemented in TensorFlow. Original paper: 'Xception: Deep Learning with Depthwise Separable Convolutions,' https://arxiv.org/abs/1610.02357. """ __all__ = ['Xception', 'xception'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import Conv2d, BatchNorm, MaxPool2d, AvgPool2d, conv1x1_block, conv3x3_block, flatten,\ SimpleSequential, is_channels_first class DwsConv(nn.Layer): """ Depthwise separable convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 0 Padding value for convolution layer. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides=1, padding=0, data_format="channels_last", **kwargs): super(DwsConv, self).__init__(**kwargs) self.dw_conv = Conv2d( in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, strides=strides, padding=padding, groups=in_channels, use_bias=False, data_format=data_format, name="dw_conv") self.pw_conv = Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, use_bias=False, data_format=data_format, name="pw_conv") def call(self, x, training=None): x = self.dw_conv(x) x = self.pw_conv(x) return x class DwsConvBlock(nn.Layer): """ Depthwise separable convolution block with batchnorm and ReLU pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. activate : bool Whether activate the convolution block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, activate, data_format="channels_last", **kwargs): super(DwsConvBlock, self).__init__(**kwargs) self.activate = activate if self.activate: self.activ = nn.ReLU() self.conv = DwsConv( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, name="conv") self.bn = BatchNorm( data_format=data_format, name="bn") def call(self, x, training=None): if self.activate: x = self.activ(x) x = self.conv(x) x = self.bn(x, training=training) return x def dws_conv3x3_block(in_channels, out_channels, activate, data_format="channels_last", **kwargs): """ 3x3 version of the depthwise separable convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activate : bool Whether activate the convolution block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return DwsConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, strides=1, padding=1, activate=activate, data_format=data_format, **kwargs) class XceptionUnit(nn.Layer): """ Xception unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the downsample polling. reps : int Number of repetitions. start_with_relu : bool, default True Whether start with ReLU activation. grow_first : bool, default True Whether start from growing. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, reps, start_with_relu=True, grow_first=True, data_format="channels_last", **kwargs): super(XceptionUnit, self).__init__(**kwargs) self.resize_identity = (in_channels != out_channels) or (strides != 1) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, data_format=data_format, name="identity_conv") self.body = SimpleSequential(name="body") for i in range(reps): if (grow_first and (i == 0)) or ((not grow_first) and (i == reps - 1)): in_channels_i = in_channels out_channels_i = out_channels else: if grow_first: in_channels_i = out_channels out_channels_i = out_channels else: in_channels_i = in_channels out_channels_i = in_channels activate = start_with_relu if (i == 0) else True self.body.children.append(dws_conv3x3_block( in_channels=in_channels_i, out_channels=out_channels_i, activate=activate, data_format=data_format, name="block{}".format(i + 1))) if strides != 1: self.body.children.append(MaxPool2d( pool_size=3, strides=strides, padding=1, data_format=data_format, name="pool")) def call(self, x, training=None): if self.resize_identity: identity = self.identity_conv(x, training=training) else: identity = tf.identity(x) x = self.body(x, training=training) x = x + identity return x class XceptionInitBlock(nn.Layer): """ Xception specific initial block. Parameters: ---------- in_channels : int Number of input channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, data_format="channels_last", **kwargs): super(XceptionInitBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=32, strides=2, padding=0, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=32, out_channels=64, strides=1, padding=0, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) return x class XceptionFinalBlock(nn.Layer): """ Xception specific final block. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(XceptionFinalBlock, self).__init__(**kwargs) self.conv1 = dws_conv3x3_block( in_channels=1024, out_channels=1536, activate=False, data_format=data_format, name="conv1") self.conv2 = dws_conv3x3_block( in_channels=1536, out_channels=2048, activate=True, data_format=data_format, name="conv2") self.activ = nn.ReLU() self.pool = AvgPool2d( pool_size=10, strides=1, data_format=data_format, name="pool") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.activ(x) x = self.pool(x) return x class Xception(tf.keras.Model): """ Xception model from 'Xception: Deep Learning with Depthwise Separable Convolutions,' https://arxiv.org/abs/1610.02357. Parameters: ---------- channels : list of list of int Number of output channels for each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, in_channels=3, in_size=(299, 299), classes=1000, data_format="channels_last", **kwargs): super(Xception, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(XceptionInitBlock( in_channels=in_channels, data_format=data_format, name="init_block")) in_channels = 64 for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): stage.add(XceptionUnit( in_channels=in_channels, out_channels=out_channels, strides=(2 if (j == 0) else 1), reps=(2 if (j == 0) else 3), start_with_relu=((i != 0) or (j != 0)), grow_first=((i != len(channels) - 1) or (j != len(channels_per_stage) - 1)), data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(XceptionFinalBlock( data_format=data_format, name="final_block")) self.output1 = nn.Dense( units=classes, input_dim=2048, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_xception(model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create Xception model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ channels = [[128], [256], [728] * 9, [1024]] net = Xception( channels=channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def xception(**kwargs): """ Xception model from 'Xception: Deep Learning with Depthwise Separable Convolutions,' https://arxiv.org/abs/1610.02357. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_xception(model_name="xception", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ xception, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 299, 299) if is_channels_first(data_format) else (batch, 299, 299, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != xception or weight_count == 22855952) if __name__ == "__main__": _test()
14,191
30.191209
118
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/darknet53.py
""" DarkNet-53 for ImageNet-1K, implemented in TensorFlow. Original source: 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767. """ __all__ = ['DarkNet53', 'darknet53'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1_block, conv3x3_block, SimpleSequential, flatten class DarkUnit(nn.Layer): """ DarkNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. alpha : float Slope coefficient for Leaky ReLU activation. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, alpha, data_format="channels_last", **kwargs): super(DarkUnit, self).__init__(**kwargs) assert (out_channels % 2 == 0) mid_channels = out_channels // 2 self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, name="conv2") def call(self, x, training=None): identity = x x = self.conv1(x, training=training) x = self.conv2(x, training=training) return x + identity class DarkNet53(tf.keras.Model): """ DarkNet-53 model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. alpha : float, default 0.1 Slope coefficient for Leaky ReLU activation. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, alpha=0.1, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(DarkNet53, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): if j == 0: stage.add(conv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=2, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, name="unit{}".format(j + 1))) else: stage.add(DarkUnit( in_channels=in_channels, out_channels=out_channels, alpha=alpha, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_darknet53(model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create DarkNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ init_block_channels = 32 layers = [2, 3, 9, 9, 5] channels_per_layers = [64, 128, 256, 512, 1024] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = DarkNet53( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def darknet53(**kwargs): """ DarkNet-53 'Reference' model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_darknet53(model_name="darknet53", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K pretrained = False models = [ darknet53, ] for model in models: net = model(pretrained=pretrained) batch = 14 x = tf.random.normal((batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != darknet53 or weight_count == 41609928) if __name__ == "__main__": _test()
7,225
31.54955
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/mobilenet.py
""" MobileNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. """ __all__ = ['MobileNet', 'mobilenet_w1', 'mobilenet_w3d4', 'mobilenet_wd2', 'mobilenet_wd4', 'get_mobilenet'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv3x3_block, dwsconv3x3_block, SimpleSequential, flatten class MobileNet(tf.keras.Model): """ MobileNet model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- channels : list of list of int Number of output channels for each unit. first_stage_stride : bool Whether stride is used at the first stage. dw_use_bn : bool, default True Whether to use BatchNorm layer (depthwise convolution block). dw_activation : function or str or None, default 'relu' Activation function after the depthwise convolution block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, first_stage_stride, dw_use_bn=True, dw_activation="relu", in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(MobileNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") init_block_channels = channels[0][0] self.features.add(conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, strides=2, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels[1:]): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and ((i != 0) or first_stage_stride) else 1 stage.add(dwsconv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=strides, dw_use_bn=dw_use_bn, dw_activation=dw_activation, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_mobilenet(width_scale, dws_simplified=False, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create MobileNet model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. dws_simplified : bool, default False Whether to use simplified depthwise separable convolution block. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 512], [1024, 1024]] first_stage_stride = False if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] if dws_simplified: dw_use_bn = False dw_activation = None else: dw_use_bn = True dw_activation = "relu" net = MobileNet( channels=channels, first_stage_stride=first_stage_stride, dw_use_bn=dw_use_bn, dw_activation=dw_activation, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def mobilenet_w1(**kwargs): """ 1.0 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=1.0, model_name="mobilenet_w1", **kwargs) def mobilenet_w3d4(**kwargs): """ 0.75 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=0.75, model_name="mobilenet_w3d4", **kwargs) def mobilenet_wd2(**kwargs): """ 0.5 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=0.5, model_name="mobilenet_wd2", **kwargs) def mobilenet_wd4(**kwargs): """ 0.25 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=0.25, model_name="mobilenet_wd4", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K pretrained = False models = [ mobilenet_w1, mobilenet_w3d4, mobilenet_wd2, mobilenet_wd4, ] for model in models: net = model(pretrained=pretrained) batch = 14 x = tf.random.normal((batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != mobilenet_w1 or weight_count == 4231976) assert (model != mobilenet_w3d4 or weight_count == 2585560) assert (model != mobilenet_wd2 or weight_count == 1331592) assert (model != mobilenet_wd4 or weight_count == 470072) if __name__ == "__main__": _test()
8,450
33.493878
119
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/dpn.py
""" DPN for ImageNet-1K, implemented in TensorFlow. Original paper: 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. """ __all__ = ['DPN', 'dpn68', 'dpn68b', 'dpn98', 'dpn107', 'dpn131'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import MaxPool2d, GlobalAvgPool2d, BatchNorm, Conv2d, conv1x1, DualPathSequential, SimpleSequential,\ flatten, is_channels_first, get_channel_axis class GlobalAvgMaxPool2D(nn.Layer): """ Global average+max pooling operation for spatial data. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(GlobalAvgMaxPool2D, self).__init__(**kwargs) self.axis = get_channel_axis(data_format) self.avg_pool = nn.GlobalAvgPool2D( data_format=data_format, name="avg_pool") self.max_pool = nn.GlobalMaxPool2D( data_format=data_format, name="max_pool") def call(self, x, training=None): x_avg = self.avg_pool(x) x_max = self.max_pool(x) x = 0.5 * (x_avg + x_max) x = tf.expand_dims(tf.expand_dims(x, axis=self.axis), axis=self.axis) return x def dpn_batch_norm(channels, data_format="channels_last", **kwargs): """ DPN specific Batch normalization layer. Parameters: ---------- channels : int Number of channels in input data. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ assert (channels is not None) return BatchNorm( epsilon=0.001, data_format=data_format, **kwargs) class PreActivation(nn.Layer): """ DPN specific block, which performs the preactivation like in RreResNet. Parameters: ---------- channels : int Number of channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, data_format="channels_last", **kwargs): super(PreActivation, self).__init__(**kwargs) self.bn = dpn_batch_norm( channels=channels, data_format=data_format, name="bn") self.activ = nn.ReLU() def call(self, x, training=None): x = self.bn(x, training=training) x = self.activ(x) return x class DPNConv(nn.Layer): """ DPN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. groups : int Number of groups. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, groups, data_format="channels_last", **kwargs): super(DPNConv, self).__init__(**kwargs) self.bn = dpn_batch_norm( channels=in_channels, data_format=data_format, name="bn") self.activ = nn.ReLU() self.conv = Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, groups=groups, use_bias=False, data_format=data_format, name="conv") def call(self, x, training=None): x = self.bn(x, training=training) x = self.activ(x) x = self.conv(x) return x def dpn_conv1x1(in_channels, out_channels, strides=1, data_format="channels_last", **kwargs): """ 1x1 version of the DPN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int, default 1 Strides of the convolution. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return DPNConv( in_channels=in_channels, out_channels=out_channels, kernel_size=1, strides=strides, padding=0, groups=1, data_format=data_format, **kwargs) def dpn_conv3x3(in_channels, out_channels, strides, groups, data_format="channels_last", **kwargs): """ 3x3 version of the DPN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. groups : int Number of groups. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return DPNConv( in_channels=in_channels, out_channels=out_channels, kernel_size=3, strides=strides, padding=1, groups=groups, data_format=data_format, **kwargs) class DPNUnit(nn.Layer): """ DPN unit. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of intermediate channels. bw : int Number of residual channels. inc : int Incrementing step for channels. groups : int Number of groups in the units. has_proj : bool Whether to use projection. key_strides : int Key strides of the convolutions. b_case : bool, default False Whether to use B-case model. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, mid_channels, bw, inc, groups, has_proj, key_strides, b_case=False, data_format="channels_last", **kwargs): super(DPNUnit, self).__init__(**kwargs) self.bw = bw self.has_proj = has_proj self.b_case = b_case self.data_format = data_format if self.has_proj: self.conv_proj = dpn_conv1x1( in_channels=in_channels, out_channels=bw + 2 * inc, strides=key_strides, data_format=data_format, name="conv_proj") self.conv1 = dpn_conv1x1( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") self.conv2 = dpn_conv3x3( in_channels=mid_channels, out_channels=mid_channels, strides=key_strides, groups=groups, data_format=data_format, name="conv2") if b_case: self.preactiv = PreActivation( channels=mid_channels, data_format=data_format, name="preactiv") self.conv3a = conv1x1( in_channels=mid_channels, out_channels=bw, data_format=data_format, name="conv3a") self.conv3b = conv1x1( in_channels=mid_channels, out_channels=inc, data_format=data_format, name="conv3b") else: self.conv3 = dpn_conv1x1( in_channels=mid_channels, out_channels=bw + inc, data_format=data_format, name="conv3") def call(self, x1, x2=None, training=None): axis = get_channel_axis(self.data_format) x_in = tf.concat([x1, x2], axis=axis) if x2 is not None else x1 if self.has_proj: x_s = self.conv_proj(x_in, training=training) channels = (x_s.get_shape().as_list())[axis] x_s1, x_s2 = tf.split(x_s, num_or_size_splits=[self.bw, channels - self.bw], axis=axis) # x_s1 = F.slice_axis(x_s, axis=1, begin=0, end=self.bw) # x_s2 = F.slice_axis(x_s, axis=1, begin=self.bw, end=None) else: assert (x2 is not None) x_s1 = x1 x_s2 = x2 x_in = self.conv1(x_in, training=training) x_in = self.conv2(x_in, training=training) if self.b_case: x_in = self.preactiv(x_in, training=training) y1 = self.conv3a(x_in, training=training) y2 = self.conv3b(x_in, training=training) else: x_in = self.conv3(x_in, training=training) # y1 = F.slice_axis(x_in, axis=1, begin=0, end=self.bw) # y2 = F.slice_axis(x_in, axis=1, begin=self.bw, end=None) channels = (x_in.get_shape().as_list())[axis] y1, y2 = tf.split(x_in, num_or_size_splits=[self.bw, channels - self.bw], axis=axis) residual = x_s1 + y1 dense = tf.concat([x_s2, y2], axis=axis) return residual, dense class DPNInitBlock(nn.Layer): """ DPN specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. padding : int or tuple/list of 2 int Padding value for convolution layer. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, padding, data_format="channels_last", **kwargs): super(DPNInitBlock, self).__init__(**kwargs) self.conv = Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=2, padding=padding, use_bias=False, data_format=data_format, name="conv") self.bn = dpn_batch_norm( channels=out_channels, data_format=data_format, name="bn") self.activ = nn.ReLU() self.pool = MaxPool2d( pool_size=3, strides=2, padding=1, data_format=data_format, name="pool") def call(self, x, training=None): x = self.conv(x) x = self.bn(x, training=training) x = self.activ(x) x = self.pool(x) return x class DPNFinalBlock(nn.Layer): """ DPN final block, which performs the preactivation with cutting. Parameters: ---------- channels : int Number of channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, data_format="channels_last", **kwargs): super(DPNFinalBlock, self).__init__(**kwargs) self.data_format = data_format self.activ = PreActivation( channels=channels, data_format=data_format, name="activ") def call(self, x1, x2, training=None): assert (x2 is not None) x = tf.concat([x1, x2], axis=get_channel_axis(self.data_format)) x = self.activ(x) return x, None class DPN(tf.keras.Model): """ DPN model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. init_block_kernel_size : int or tuple/list of 2 int Convolution window size for the initial unit. init_block_padding : int or tuple/list of 2 int Padding value for convolution layer in the initial unit. rs : list f int Number of intermediate channels for each unit. bws : list f int Number of residual channels for each unit. incs : list f int Incrementing step for channels for each unit. groups : int Number of groups in the units. b_case : bool Whether to use B-case model. for_training : bool Whether to use model for training. test_time_pool : bool Whether to use the avg-max pooling in the inference mode. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, init_block_kernel_size, init_block_padding, rs, bws, incs, groups, b_case, for_training, test_time_pool, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(DPN, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = DualPathSequential( return_two=False, first_ordinals=1, last_ordinals=0, name="features") self.features.children.append(DPNInitBlock( in_channels=in_channels, out_channels=init_block_channels, kernel_size=init_block_kernel_size, padding=init_block_padding, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential(name="stage{}".format(i + 1)) r = rs[i] bw = bws[i] inc = incs[i] for j, out_channels in enumerate(channels_per_stage): has_proj = (j == 0) key_strides = 2 if (j == 0) and (i != 0) else 1 stage.children.append(DPNUnit( in_channels=in_channels, mid_channels=r, bw=bw, inc=inc, groups=groups, has_proj=has_proj, key_strides=key_strides, b_case=b_case, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.children.append(stage) self.features.children.append(DPNFinalBlock( channels=in_channels, data_format=data_format, name="final_block")) self.output1 = SimpleSequential(name="output1") if for_training or not test_time_pool: self.output1.add(GlobalAvgPool2d( data_format=data_format, name="final_pool")) self.output1.add(conv1x1( in_channels=in_channels, out_channels=classes, use_bias=True, data_format=data_format, name="classifier")) else: self.output1.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="avg_pool")) self.output1.add(conv1x1( in_channels=in_channels, out_channels=classes, use_bias=True, data_format=data_format, name="classifier")) self.output1.add(GlobalAvgMaxPool2D( data_format=data_format, name="avgmax_pool")) def call(self, x, training=None): x = self.features(x, training=training) x = self.output1(x) x = flatten(x, self.data_format) return x def get_dpn(num_layers, b_case=False, for_training=False, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create DPN model with specific parameters. Parameters: ---------- num_layers : int Number of layers. b_case : bool, default False Whether to use B-case model. for_training : bool Whether to use model for training. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if num_layers == 68: init_block_channels = 10 init_block_kernel_size = 3 init_block_padding = 1 bw_factor = 1 k_r = 128 groups = 32 k_sec = (3, 4, 12, 3) incs = (16, 32, 32, 64) test_time_pool = True elif num_layers == 98: init_block_channels = 96 init_block_kernel_size = 7 init_block_padding = 3 bw_factor = 4 k_r = 160 groups = 40 k_sec = (3, 6, 20, 3) incs = (16, 32, 32, 128) test_time_pool = True elif num_layers == 107: init_block_channels = 128 init_block_kernel_size = 7 init_block_padding = 3 bw_factor = 4 k_r = 200 groups = 50 k_sec = (4, 8, 20, 3) incs = (20, 64, 64, 128) test_time_pool = True elif num_layers == 131: init_block_channels = 128 init_block_kernel_size = 7 init_block_padding = 3 bw_factor = 4 k_r = 160 groups = 40 k_sec = (4, 8, 28, 3) incs = (16, 32, 32, 128) test_time_pool = True else: raise ValueError("Unsupported DPN version with number of layers {}".format(num_layers)) channels = [[0] * li for li in k_sec] rs = [0 * li for li in k_sec] bws = [0 * li for li in k_sec] for i in range(len(k_sec)): rs[i] = (2 ** i) * k_r bws[i] = (2 ** i) * 64 * bw_factor inc = incs[i] channels[i][0] = bws[i] + 3 * inc for j in range(1, k_sec[i]): channels[i][j] = channels[i][j - 1] + inc net = DPN( channels=channels, init_block_channels=init_block_channels, init_block_kernel_size=init_block_kernel_size, init_block_padding=init_block_padding, rs=rs, bws=bws, incs=incs, groups=groups, b_case=b_case, for_training=for_training, test_time_pool=test_time_pool, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def dpn68(**kwargs): """ DPN-68 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_dpn(num_layers=68, b_case=False, model_name="dpn68", **kwargs) def dpn68b(**kwargs): """ DPN-68b model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_dpn(num_layers=68, b_case=True, model_name="dpn68b", **kwargs) def dpn98(**kwargs): """ DPN-98 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_dpn(num_layers=98, b_case=False, model_name="dpn98", **kwargs) def dpn107(**kwargs): """ DPN-107 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_dpn(num_layers=107, b_case=False, model_name="dpn107", **kwargs) def dpn131(**kwargs): """ DPN-131 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_dpn(num_layers=131, b_case=False, model_name="dpn131", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ dpn68, dpn68b, dpn98, dpn107, dpn131, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != dpn68 or weight_count == 12611602) assert (model != dpn68b or weight_count == 12611602) assert (model != dpn98 or weight_count == 61570728) assert (model != dpn107 or weight_count == 86917800) assert (model != dpn131 or weight_count == 79254504) if __name__ == "__main__": _test()
23,478
30.056878
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/sknet.py
""" SKNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586. """ __all__ = ['SKNet', 'sknet50', 'sknet101', 'sknet152'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent, SimpleSequential, flatten, is_channels_first,\ get_channel_axis from .resnet import ResInitBlock class SKConvBlock(nn.Layer): """ SKNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. groups : int, default 32 Number of groups in branches. num_branches : int, default 2 Number of branches (`M` parameter in the paper). reduction : int, default 16 Reduction value for intermediate channels (`r` parameter in the paper). min_channels : int, default 32 Minimal number of intermediate channels (`L` parameter in the paper). data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, groups=32, num_branches=2, reduction=16, min_channels=32, data_format="channels_last", **kwargs): super(SKConvBlock, self).__init__(**kwargs) self.num_branches = num_branches self.out_channels = out_channels self.data_format = data_format self.axis = get_channel_axis(data_format) mid_channels = max(in_channels // reduction, min_channels) self.branches = Concurrent( stack=True, data_format=data_format, name="branches") for i in range(num_branches): dilation = 1 + i self.branches.children.append(conv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=strides, padding=dilation, dilation=dilation, groups=groups, data_format=data_format, name="branch{}".format(i + 2))) self.pool = nn.GlobalAveragePooling2D( data_format=data_format, name="pool") self.fc1 = conv1x1_block( in_channels=out_channels, out_channels=mid_channels, data_format=data_format, name="fc1") self.fc2 = conv1x1( in_channels=mid_channels, out_channels=(out_channels * num_branches), data_format=data_format, name="fc2") self.softmax = nn.Softmax(axis=self.axis) def call(self, x, training=None): y = self.branches(x) u = tf.math.reduce_sum(y, axis=self.axis) s = self.pool(u) if is_channels_first(self.data_format): s = tf.expand_dims(tf.expand_dims(s, 2), 3) else: s = tf.expand_dims(tf.expand_dims(s, 1), 2) z = self.fc1(s) w = self.fc2(z) if is_channels_first(self.data_format): w = tf.reshape(w, shape=(-1, self.num_branches, self.out_channels)) else: w = tf.reshape(w, shape=(-1, self.out_channels, self.num_branches)) w = self.softmax(w) if is_channels_first(self.data_format): w = tf.expand_dims(tf.expand_dims(w, 3), 4) else: w = tf.expand_dims(tf.expand_dims(w, 1), 2) y = y * w y = tf.math.reduce_sum(y, axis=self.axis) return y class SKNetBottleneck(nn.Layer): """ SKNet bottleneck block for residual path in SKNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. bottleneck_factor : int, default 2 Bottleneck factor. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, bottleneck_factor=2, data_format="channels_last", **kwargs): super(SKNetBottleneck, self).__init__(**kwargs) mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") self.conv2 = SKConvBlock( in_channels=mid_channels, out_channels=mid_channels, strides=strides, data_format=data_format, name="conv2") self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None, data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class SKNetUnit(nn.Layer): """ SKNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, data_format="channels_last", **kwargs): super(SKNetUnit, self).__init__(**kwargs) self.resize_identity = (in_channels != out_channels) or (strides != 1) self.body = SKNetBottleneck( in_channels=in_channels, out_channels=out_channels, strides=strides, data_format=data_format, name="body") if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, strides=strides, activation=None, data_format=data_format, name="identity_conv") self.activ = nn.ReLU() def call(self, x, training=None): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class SKNet(tf.keras.Model): """ SKNet model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(SKNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(ResInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(SKNetUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_sknet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create SKNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] else: raise ValueError("Unsupported SKNet with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = SKNet( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def sknet50(**kwargs): """ SKNet-50 model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sknet(blocks=50, model_name="sknet50", **kwargs) def sknet101(**kwargs): """ SKNet-101 model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sknet(blocks=101, model_name="sknet101", **kwargs) def sknet152(**kwargs): """ SKNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_sknet(blocks=152, model_name="sknet152", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ sknet50, sknet101, sknet152, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != sknet50 or weight_count == 27479784) assert (model != sknet101 or weight_count == 48736040) assert (model != sknet152 or weight_count == 66295656) if __name__ == "__main__": _test()
13,222
31.09466
117
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/spnasnet.py
""" Single-Path NASNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours,' https://arxiv.org/abs/1904.02877. """ __all__ = ['SPNASNet', 'spnasnet'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SimpleSequential, flatten,\ is_channels_first class SPNASUnit(nn.Layer): """ Single-Path NASNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the second convolution layer. use_kernel3 : bool Whether to use 3x3 (instead of 5x5) kernel. exp_factor : int Expansion factor for each unit. use_skip : bool, default True Whether to use skip connection. activation : str, default 'relu' Activation function or name of activation function. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, use_kernel3, exp_factor, use_skip=True, activation="relu", data_format="channels_last", **kwargs): super(SPNASUnit, self).__init__(**kwargs) assert (exp_factor >= 1) self.residual = (in_channels == out_channels) and (strides == 1) and use_skip self.use_exp_conv = exp_factor > 1 mid_channels = exp_factor * in_channels if self.use_exp_conv: self.exp_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=activation, data_format=data_format, name="exp_conv") if use_kernel3: self.conv1 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, strides=strides, activation=activation, data_format=data_format, name="conv1") else: self.conv1 = dwconv5x5_block( in_channels=mid_channels, out_channels=mid_channels, strides=strides, activation=activation, data_format=data_format, name="conv1") self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None, data_format=data_format, name="conv2") def call(self, x, training=None): if self.residual: identity = x if self.use_exp_conv: x = self.exp_conv(x, training=training) x = self.conv1(x, training=training) x = self.conv2(x, training=training) if self.residual: x = x + identity return x class SPNASInitBlock(nn.Layer): """ Single-Path NASNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, mid_channels, data_format="channels_last", **kwargs): super(SPNASInitBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, strides=2, data_format=data_format, name="conv1") self.conv2 = SPNASUnit( in_channels=mid_channels, out_channels=out_channels, strides=1, use_kernel3=True, exp_factor=1, use_skip=False, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) return x class SPNASFinalBlock(nn.Layer): """ Single-Path NASNet specific final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, mid_channels, data_format="channels_last", **kwargs): super(SPNASFinalBlock, self).__init__(**kwargs) self.conv1 = SPNASUnit( in_channels=in_channels, out_channels=mid_channels, strides=1, use_kernel3=True, exp_factor=6, use_skip=False, data_format=data_format, name="conv1") self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) return x class SPNASNet(tf.keras.Model): """ Single-Path NASNet model from 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours,' https://arxiv.org/abs/1904.02877. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : list of 2 int Number of output channels for the initial unit. final_block_channels : list of 2 int Number of output channels for the final block of the feature extractor. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. exp_factors : list of list of int Expansion factor for each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, final_block_channels, kernels3, exp_factors, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(SPNASNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(SPNASInitBlock( in_channels=in_channels, out_channels=init_block_channels[1], mid_channels=init_block_channels[0], data_format=data_format, name="init_block")) in_channels = init_block_channels[1] for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if ((j == 0) and (i != 3)) or \ ((j == len(channels_per_stage) // 2) and (i == 3)) else 1 use_kernel3 = kernels3[i][j] == 1 exp_factor = exp_factors[i][j] stage.add(SPNASUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, use_kernel3=use_kernel3, exp_factor=exp_factor, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(SPNASFinalBlock( in_channels=in_channels, out_channels=final_block_channels[1], mid_channels=final_block_channels[0], data_format=data_format, name="final_block")) in_channels = final_block_channels[1] self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_spnasnet(model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create Single-Path NASNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ init_block_channels = [32, 16] final_block_channels = [320, 1280] channels = [[24, 24, 24], [40, 40, 40, 40], [80, 80, 80, 80], [96, 96, 96, 96, 192, 192, 192, 192]] kernels3 = [[1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0]] exp_factors = [[3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3, 6, 6, 6, 6]] net = SPNASNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, kernels3=kernels3, exp_factors=exp_factors, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def spnasnet(**kwargs): """ Single-Path NASNet model from 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours,' https://arxiv.org/abs/1904.02877. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_spnasnet(model_name="spnasnet", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ spnasnet, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != spnasnet or weight_count == 4421616) if __name__ == "__main__": _test()
12,190
32.491758
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/fastscnn.py
""" Fast-SCNN for image segmentation, implemented in TensorFlow. Original paper: 'Fast-SCNN: Fast Semantic Segmentation Network,' https://arxiv.org/abs/1902.04502. """ __all__ = ['FastSCNN', 'fastscnn_cityscapes'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwsconv3x3_block, Concurrent,\ InterpolationBlock, SimpleSequential, Identity, get_im_size, is_channels_first class Stem(nn.Layer): """ Fast-SCNN specific stem block. Parameters: ---------- in_channels : int Number of input channels. channels : tuple/list of 3 int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, channels, data_format="channels_last", **kwargs): super(Stem, self).__init__(**kwargs) assert (len(channels) == 3) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=channels[0], strides=2, padding=0, data_format=data_format, name="conv1") self.conv2 = dwsconv3x3_block( in_channels=channels[0], out_channels=channels[1], strides=2, data_format=data_format, name="conv2") self.conv3 = dwsconv3x3_block( in_channels=channels[1], out_channels=channels[2], strides=2, data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) return x class LinearBottleneck(nn.Layer): """ Fast-SCNN specific Linear Bottleneck layer from MobileNetV2. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the second convolution layer. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, data_format="channels_last", **kwargs): super(LinearBottleneck, self).__init__(**kwargs) self.residual = (in_channels == out_channels) and (strides == 1) mid_channels = in_channels * 6 self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") self.conv2 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, strides=strides, data_format=data_format, name="conv2") self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None, data_format=data_format, name="conv3") def call(self, x, training=None): if self.residual: identity = x x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) if self.residual: x = x + identity return x class FeatureExtractor(nn.Layer): """ Fast-SCNN specific feature extractor/encoder. Parameters: ---------- in_channels : int Number of input channels. channels : list of list of int Number of output channels for each unit. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, channels, data_format="channels_last", **kwargs): super(FeatureExtractor, self).__init__(**kwargs) self.features = SimpleSequential(name="features") for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != len(channels) - 1) else 1 stage.add(LinearBottleneck( in_channels=in_channels, out_channels=out_channels, strides=strides, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) def call(self, x, training=None): x = self.features(x, training=training) return x class PoolingBranch(nn.Layer): """ Fast-SCNN specific pooling branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. in_size : tuple of 2 int or None Spatial size of input image. down_size : int Spatial size of downscaled image. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, in_size, down_size, data_format="channels_last", **kwargs): super(PoolingBranch, self).__init__(**kwargs) self.in_size = in_size self.down_size = down_size self.data_format = data_format self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, data_format=data_format, name="conv") self.up = InterpolationBlock( scale_factor=None, out_size=in_size, data_format=data_format, name="up") def call(self, x, training=None): in_size = self.in_size if self.in_size is not None else get_im_size(x, data_format=self.data_format) x = nn.AveragePooling2D(pool_size=(in_size[0] // self.down_size, in_size[1] // self.down_size), strides=1, data_format=self.data_format, name="pool")(x) x = self.conv(x, training=training) x = self.up(x, in_size) return x class FastPyramidPooling(nn.Layer): """ Fast-SCNN specific fast pyramid pooling block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. in_size : tuple of 2 int or None Spatial size of input image. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, in_size, data_format="channels_last", **kwargs): super(FastPyramidPooling, self).__init__(**kwargs) down_sizes = [1, 2, 3, 6] mid_channels = in_channels // 4 self.branches = Concurrent( data_format=data_format, name="branches") self.branches.add(Identity(name="branch1")) for i, down_size in enumerate(down_sizes): self.branches.add(PoolingBranch( in_channels=in_channels, out_channels=mid_channels, in_size=in_size, down_size=down_size, data_format=data_format, name="branch{}".format(i + 2))) self.conv = conv1x1_block( in_channels=(in_channels * 2), out_channels=out_channels, data_format=data_format, name="conv") def call(self, x, training=None): x = self.branches(x, training=training) x = self.conv(x, training=training) return x class FeatureFusion(nn.Layer): """ Fast-SCNN specific feature fusion block. Parameters: ---------- x_in_channels : int Number of high resolution (x) input channels. y_in_channels : int Number of low resolution (y) input channels. out_channels : int Number of output channels. x_in_size : tuple of 2 int or None Spatial size of high resolution (x) input image. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, x_in_channels, y_in_channels, out_channels, x_in_size, data_format="channels_last", **kwargs): super(FeatureFusion, self).__init__(**kwargs) self.x_in_size = x_in_size self.data_format = data_format self.up = InterpolationBlock( scale_factor=None, out_size=x_in_size, data_format=data_format, name="up") self.low_dw_conv = dwconv3x3_block( in_channels=y_in_channels, out_channels=out_channels, data_format=data_format, name="low_dw_conv") self.low_pw_conv = conv1x1_block( in_channels=out_channels, out_channels=out_channels, use_bias=True, activation=None, data_format=data_format, name="low_pw_conv") self.high_conv = conv1x1_block( in_channels=x_in_channels, out_channels=out_channels, use_bias=True, activation=None, data_format=data_format, name="high_conv") self.activ = nn.ReLU() def call(self, x, y, training=None): x_in_size = self.x_in_size if self.x_in_size is not None else get_im_size(x, data_format=self.data_format) y = self.up(y, x_in_size) y = self.low_dw_conv(y, training=training) y = self.low_pw_conv(y, training=training) x = self.high_conv(x, training=training) out = x + y return self.activ(out) class Head(nn.Layer): """ Fast-SCNN head (classifier) block. Parameters: ---------- in_channels : int Number of input channels. classes : int Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, classes, data_format="channels_last", **kwargs): super(Head, self).__init__(**kwargs) self.conv1 = dwsconv3x3_block( in_channels=in_channels, out_channels=in_channels, data_format=data_format, name="conv1") self.conv2 = dwsconv3x3_block( in_channels=in_channels, out_channels=in_channels, data_format=data_format, name="conv2") self.dropout = nn.Dropout( rate=0.1, name="dropout") self.conv3 = conv1x1( in_channels=in_channels, out_channels=classes, use_bias=True, data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.dropout(x, training=training) x = self.conv3(x) return x class AuxHead(nn.Layer): """ Fast-SCNN auxiliary (after stem) head (classifier) block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. classes : int Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, mid_channels, classes, data_format="channels_last", **kwargs): super(AuxHead, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") self.dropout = nn.Dropout( rate=0.1, name="dropout") self.conv2 = conv1x1( in_channels=mid_channels, out_channels=classes, use_bias=True, data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.dropout(x, training=training) x = self.conv2(x) return x class FastSCNN(tf.keras.Model): """ Fast-SCNN from 'Fast-SCNN: Fast Semantic Segmentation Network,' https://arxiv.org/abs/1902.04502. Parameters: ---------- aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default True Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 1024) Spatial size of the expected input image. classes : int, default 19 Number of segmentation classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, aux=False, fixed_size=True, in_channels=3, in_size=(1024, 1024), classes=19, data_format="channels_last", **kwargs): super(FastSCNN, self).__init__(**kwargs) assert (in_channels > 0) assert ((in_size[0] % 32 == 0) and (in_size[1] % 32 == 0)) self.in_size = in_size self.classes = classes self.aux = aux self.fixed_size = fixed_size self.data_format = data_format steam_channels = [32, 48, 64] self.stem = Stem( in_channels=in_channels, channels=steam_channels, data_format=data_format, name="stem") in_channels = steam_channels[-1] feature_channels = [[64, 64, 64], [96, 96, 96], [128, 128, 128]] self.features = FeatureExtractor( in_channels=in_channels, channels=feature_channels, data_format=data_format, name="features") pool_out_size = (in_size[0] // 32, in_size[1] // 32) if fixed_size else None self.pool = FastPyramidPooling( in_channels=feature_channels[-1][-1], out_channels=feature_channels[-1][-1], in_size=pool_out_size, data_format=data_format, name="pool") fusion_out_size = (in_size[0] // 8, in_size[1] // 8) if fixed_size else None fusion_out_channels = 128 self.fusion = FeatureFusion( x_in_channels=steam_channels[-1], y_in_channels=feature_channels[-1][-1], out_channels=fusion_out_channels, x_in_size=fusion_out_size, data_format=data_format, name="fusion") self.head = Head( in_channels=fusion_out_channels, classes=classes, data_format=data_format, name="head") self.up = InterpolationBlock( scale_factor=None, out_size=in_size, data_format=data_format, name="up") if self.aux: self.aux_head = AuxHead( in_channels=64, mid_channels=64, classes=classes, data_format=data_format, name="aux_head") def call(self, x, training=None): in_size = self.in_size if self.fixed_size else get_im_size(x, data_format=self.data_format) x = self.stem(x, training=training) y = self.features(x, training=training) y = self.pool(y, training=training) y = self.fusion(x, y, training=training) y = self.head(y, training=training) y = self.up(y, in_size) if self.aux: x = self.aux_head(x, training=training) x = self.up(x, in_size) return y, x return y def get_fastscnn(model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create Fast-SCNN model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ net = FastSCNN( **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root), by_name=True, skip_mismatch=True) return net def fastscnn_cityscapes(classes=19, aux=True, **kwargs): """ Fast-SCNN model for Cityscapes from 'Fast-SCNN: Fast Semantic Segmentation Network,' https://arxiv.org/abs/1902.04502. Parameters: ---------- classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_fastscnn(classes=classes, aux=aux, model_name="fastscnn_cityscapes", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" in_size = (1024, 2048) aux = True fixed_size = False pretrained = True models = [ (fastscnn_cityscapes, 19), ] for model, classes in models: net = model(pretrained=pretrained, in_size=in_size, aux=aux, fixed_size=fixed_size, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else (batch, in_size[0], in_size[1], 3)) ys = net(x) y = ys[0] if aux else ys assert (y.shape[0] == x.shape[0]) if is_channels_first(data_format): assert ((y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and (y.shape[3] == x.shape[3])) else: assert ((y.shape[3] == classes) and (y.shape[1] == x.shape[1]) and (y.shape[2] == x.shape[2])) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) if aux: assert (model != fastscnn_cityscapes or weight_count == 1176278) else: assert (model != fastscnn_cityscapes or weight_count == 1138051) if __name__ == "__main__": _test()
19,829
31.831126
116
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/darknet.py
""" DarkNet for ImageNet-1K, implemented in TensorFlow. Original source: 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet. """ __all__ = ['DarkNet', 'darknet_ref', 'darknet_tiny', 'darknet19'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import Conv2d, conv1x1_block, conv3x3_block, MaxPool2d, SimpleSequential, flatten def dark_convYxY(in_channels, out_channels, alpha, pointwise, data_format="channels_last", **kwargs): """ DarkNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. alpha : float Slope coefficient for Leaky ReLU activation. pointwise : bool Whether use 1x1 (pointwise) convolution or 3x3 convolution. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ if pointwise: return conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, **kwargs) else: return conv3x3_block( in_channels=in_channels, out_channels=out_channels, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, **kwargs) class DarkNet(tf.keras.Model): """ DarkNet model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet. Parameters: ---------- channels : list of list of int Number of output channels for each unit. odd_pointwise : bool Whether pointwise convolution layer is used for each odd unit. avg_pool_size : int Window size of the final average pooling. cls_activ : bool Whether classification convolution layer uses an activation. alpha : float, default 0.1 Slope coefficient for Leaky ReLU activation. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, odd_pointwise, avg_pool_size, cls_activ, alpha=0.1, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(DarkNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): stage.add(dark_convYxY( in_channels=in_channels, out_channels=out_channels, alpha=alpha, pointwise=(len(channels_per_stage) > 1) and not (((j + 1) % 2 == 1) ^ odd_pointwise), data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels if i != len(channels) - 1: stage.add(MaxPool2d( pool_size=2, strides=2, data_format=data_format, name="pool{}".format(i + 1))) self.features.add(stage) self.output1 = SimpleSequential(name="output1") self.output1.add(Conv2d( in_channels=in_channels, out_channels=classes, kernel_size=1, data_format=data_format, name="final_conv")) if cls_activ: self.output1.add(nn.LeakyReLU(alpha=alpha)) self.output1.add(nn.AveragePooling2D( pool_size=avg_pool_size, strides=1, data_format=data_format, name="final_pool")) def call(self, x, training=None): x = self.features(x, training=training) x = self.output1(x) x = flatten(x, self.data_format) return x def get_darknet(version, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create DarkNet model with specific parameters. Parameters: ---------- version : str Version of SqueezeNet ('ref', 'tiny' or '19'). model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if version == 'ref': channels = [[16], [32], [64], [128], [256], [512], [1024]] odd_pointwise = False avg_pool_size = 3 cls_activ = True elif version == 'tiny': channels = [[16], [32], [16, 128, 16, 128], [32, 256, 32, 256], [64, 512, 64, 512, 128]] odd_pointwise = True avg_pool_size = 14 cls_activ = False elif version == '19': channels = [[32], [64], [128, 64, 128], [256, 128, 256], [512, 256, 512, 256, 512], [1024, 512, 1024, 512, 1024]] odd_pointwise = False avg_pool_size = 7 cls_activ = False else: raise ValueError("Unsupported DarkNet version {}".format(version)) net = DarkNet( channels=channels, odd_pointwise=odd_pointwise, avg_pool_size=avg_pool_size, cls_activ=cls_activ, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def darknet_ref(**kwargs): """ DarkNet 'Reference' model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_darknet(version="ref", model_name="darknet_ref", **kwargs) def darknet_tiny(**kwargs): """ DarkNet Tiny model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_darknet(version="tiny", model_name="darknet_tiny", **kwargs) def darknet19(**kwargs): """ DarkNet-19 model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_darknet(version="19", model_name="darknet19", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K pretrained = False models = [ darknet_ref, darknet_tiny, darknet19, ] for model in models: net = model(pretrained=pretrained) batch = 14 x = tf.random.normal((batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != darknet_ref or weight_count == 7319416) assert (model != darknet_tiny or weight_count == 1042104) assert (model != darknet19 or weight_count == 20842376) if __name__ == "__main__": _test()
8,916
32.148699
116
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/dicenet.py
""" DiCENet for ImageNet-1K, implemented in TensorFlow. Original paper: 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. """ __all__ = ['DiceNet', 'dicenet_wd5', 'dicenet_wd2', 'dicenet_w3d4', 'dicenet_w1', 'dicenet_w5d4', 'dicenet_w3d2', 'dicenet_w7d8', 'dicenet_w2'] import os import math import tensorflow as tf import tensorflow.keras.layers as nn from .common import conv1x1, conv3x3, conv1x1_block, conv3x3_block, AvgPool2d, MaxPool2d, NormActivation,\ ChannelShuffle, Concurrent, PReLU2, SimpleSequential, is_channels_first, get_channel_axis, flatten class SpatialDiceBranch(nn.Layer): """ Spatial element of DiCE block for selected dimension. Parameters: ---------- sp_size : int Desired size for selected spatial dimension. is_height : bool Is selected dimension height. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, sp_size, is_height, data_format="channels_last", **kwargs): super(SpatialDiceBranch, self).__init__(**kwargs) self.is_height = is_height self.data_format = data_format if is_channels_first(self.data_format): self.index = 2 if is_height else 3 else: self.index = 1 if is_height else 2 self.base_sp_size = sp_size self.conv = conv3x3( in_channels=self.base_sp_size, out_channels=self.base_sp_size, groups=self.base_sp_size, data_format=data_format, name="conv") def call(self, x, training=None): x_shape = x.get_shape().as_list() height, width = x_shape[2:4] if is_channels_first(self.data_format) else x_shape[1:3] if self.is_height: real_sp_size = height real_in_size = (real_sp_size, width) base_in_size = (self.base_sp_size, width) else: real_sp_size = width real_in_size = (height, real_sp_size) base_in_size = (height, self.base_sp_size) if real_sp_size != self.base_sp_size: if is_channels_first(self.data_format): x = tf.transpose(x, perm=[0, 2, 3, 1]) x = tf.image.resize( images=x, size=base_in_size, method=self.method) if is_channels_first(self.data_format): x = tf.transpose(x, perm=[0, 3, 1, 2]) if self.is_height: if is_channels_first(self.data_format): x = tf.transpose(x, perm=(0, 2, 1, 3)) else: x = tf.transpose(x, perm=(0, 3, 2, 1)) else: if is_channels_first(self.data_format): x = tf.transpose(x, perm=(0, 3, 2, 1)) else: x = tf.transpose(x, perm=(0, 1, 3, 2)) x = self.conv(x) if self.is_height: if is_channels_first(self.data_format): x = tf.transpose(x, perm=(0, 2, 1, 3)) else: x = tf.transpose(x, perm=(0, 3, 2, 1)) else: if is_channels_first(self.data_format): x = tf.transpose(x, perm=(0, 3, 2, 1)) else: x = tf.transpose(x, perm=(0, 1, 3, 2)) changed_sp_size = x.shape[self.index] if real_sp_size != changed_sp_size: if is_channels_first(self.data_format): x = tf.transpose(x, perm=[0, 2, 3, 1]) x = tf.image.resize( images=x, size=real_in_size, method=self.method) if is_channels_first(self.data_format): x = tf.transpose(x, perm=[0, 3, 1, 2]) return x class DiceBaseBlock(nn.Layer): """ Base part of DiCE block (without attention). Parameters: ---------- channels : int Number of input/output channels. in_size : tuple of two ints Spatial size of the expected input image. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, in_size, data_format="channels_last", **kwargs): super(DiceBaseBlock, self).__init__(**kwargs) mid_channels = 3 * channels self.convs = Concurrent() self.convs.add(conv3x3( in_channels=channels, out_channels=channels, groups=channels, data_format=data_format, name="ch_conv")) self.convs.add(SpatialDiceBranch( sp_size=in_size[0], is_height=True, data_format=data_format, name="h_conv")) self.convs.add(SpatialDiceBranch( sp_size=in_size[1], is_height=False, data_format=data_format, name="w_conv")) self.norm_activ = NormActivation( in_channels=mid_channels, activation=(lambda: PReLU2(in_channels=mid_channels, name="activ")), data_format=data_format, name="norm_activ") self.shuffle = ChannelShuffle( channels=mid_channels, groups=3, data_format=data_format, name="shuffle") self.squeeze_conv = conv1x1_block( in_channels=mid_channels, out_channels=channels, groups=channels, activation=(lambda: PReLU2(in_channels=channels, name="activ")), data_format=data_format, name="squeeze_conv") def call(self, x, training=None): x = self.convs(x) x = self.norm_activ(x, training=training) x = self.shuffle(x) x = self.squeeze_conv(x, training=training) return x class DiceAttBlock(nn.Layer): """ Pure attention part of DiCE block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. reduction : int, default 4 Squeeze reduction value. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, reduction=4, data_format="channels_last", **kwargs): super(DiceAttBlock, self).__init__(**kwargs) self.data_format = data_format mid_channels = in_channels // reduction self.pool = nn.GlobalAveragePooling2D( data_format=data_format, name="pool") self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels, use_bias=False, data_format=data_format, name="conv1") self.activ = nn.ReLU() self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, use_bias=False, data_format=data_format, name="conv2") self.sigmoid = tf.nn.sigmoid def call(self, x, training=None): w = self.pool(x) axis = -1 if is_channels_first(self.data_format) else 1 w = tf.expand_dims(tf.expand_dims(w, axis=axis), axis=axis) w = self.conv1(w) w = self.activ(w) w = self.conv2(w) w = self.sigmoid(w) return w class DiceBlock(nn.Layer): """ DiCE block (volume-wise separable convolutions). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. in_size : tuple of two ints Spatial size of the expected input image. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, in_size, data_format="channels_last", **kwargs): super(DiceBlock, self).__init__(**kwargs) proj_groups = math.gcd(in_channels, out_channels) self.base_block = DiceBaseBlock( channels=in_channels, in_size=in_size, data_format=data_format, name="base_block") self.att = DiceAttBlock( in_channels=in_channels, out_channels=out_channels, data_format=data_format, name="att") self.proj_conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, groups=proj_groups, activation=(lambda: PReLU2(in_channels=out_channels, name="activ")), data_format=data_format, name="proj_conv") def call(self, x, training=None): x = self.base_block(x, training=training) w = self.att(x, training=training) x = self.proj_conv(x, training=training) x = x * w return x class StridedDiceLeftBranch(nn.Layer): """ Left branch of the strided DiCE block. Parameters: ---------- channels : int Number of input/output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, data_format="channels_last", **kwargs): super(StridedDiceLeftBranch, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=channels, out_channels=channels, strides=2, groups=channels, activation=(lambda: PReLU2(in_channels=channels, name="activ")), data_format=data_format, name="conv1") self.conv2 = conv1x1_block( in_channels=channels, out_channels=channels, activation=(lambda: PReLU2(in_channels=channels, name="activ")), data_format=data_format, name="conv2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) return x class StridedDiceRightBranch(nn.Layer): """ Right branch of the strided DiCE block. Parameters: ---------- channels : int Number of input/output channels. in_size : tuple of two ints Spatial size of the expected input image. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, in_size, data_format="channels_last", **kwargs): super(StridedDiceRightBranch, self).__init__(**kwargs) self.pool = AvgPool2d( pool_size=3, strides=2, padding=1, data_format=data_format, name="pool") self.dice = DiceBlock( in_channels=channels, out_channels=channels, in_size=(in_size[0] // 2, in_size[1] // 2), data_format=data_format, name="dice") self.conv = conv1x1_block( in_channels=channels, out_channels=channels, activation=(lambda: PReLU2(in_channels=channels, name="activ")), data_format=data_format, name="conv") def call(self, x, training=None): x = self.pool(x) x = self.dice(x, training=training) x = self.conv(x, training=training) return x class StridedDiceBlock(nn.Layer): """ Strided DiCE block (strided volume-wise separable convolutions). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. in_size : tuple of two ints Spatial size of the expected input image. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, in_size, data_format="channels_last", **kwargs): super(StridedDiceBlock, self).__init__(**kwargs) assert (out_channels == 2 * in_channels) self.branches = Concurrent() self.branches.add(StridedDiceLeftBranch( channels=in_channels, data_format=data_format, name="left_branch")) self.branches.add(StridedDiceRightBranch( channels=in_channels, in_size=in_size, data_format=data_format, name="right_branch")) self.shuffle = ChannelShuffle( channels=out_channels, groups=2, data_format=data_format, name="shuffle") def call(self, x, training=None): x = self.branches(x, training=training) x = self.shuffle(x) return x class ShuffledDiceRightBranch(nn.Layer): """ Right branch of the shuffled DiCE block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. in_size : tuple of two ints Spatial size of the expected input image. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, in_size, data_format="channels_last", **kwargs): super(ShuffledDiceRightBranch, self).__init__(**kwargs) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=(lambda: PReLU2(in_channels=out_channels, name="activ")), data_format=data_format, name="conv") self.dice = DiceBlock( in_channels=out_channels, out_channels=out_channels, in_size=in_size, data_format=data_format, name="dice") def call(self, x, training=None): x = self.conv(x, training=training) x = self.dice(x, training=training) return x class ShuffledDiceBlock(nn.Layer): """ Shuffled DiCE block (shuffled volume-wise separable convolutions). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. in_size : tuple of two ints Spatial size of the expected input image. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, in_size, data_format="channels_last", **kwargs): super(ShuffledDiceBlock, self).__init__(**kwargs) self.data_format = data_format self.left_part = in_channels - in_channels // 2 right_in_channels = in_channels - self.left_part right_out_channels = out_channels - self.left_part self.right_branch = ShuffledDiceRightBranch( in_channels=right_in_channels, out_channels=right_out_channels, in_size=in_size, data_format=data_format, name="right_branch") self.shuffle = ChannelShuffle( channels=(2 * right_out_channels), groups=2, data_format=data_format, name="shuffle") def call(self, x, training=None): axis = get_channel_axis(self.data_format) x1, x2 = tf.split(x, num_or_size_splits=2, axis=axis) x2 = self.right_branch(x2, training=training) x = tf.concat([x1, x2], axis=axis) x = self.shuffle(x) return x class DiceInitBlock(nn.Layer): """ DiceNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(DiceInitBlock, self).__init__(**kwargs) self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=2, activation=(lambda: PReLU2(in_channels=out_channels, name="activ")), data_format=data_format, name="conv") self.pool = MaxPool2d( pool_size=3, strides=2, padding=1, data_format=data_format, name="pool") def call(self, x, training=None): x = self.conv(x, training=training) x = self.pool(x) return x class DiceClassifier(nn.Layer): """ DiceNet specific classifier block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. classes : int, default 1000 Number of classification classes. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, mid_channels, classes, dropout_rate, data_format="channels_last", **kwargs): super(DiceClassifier, self).__init__(**kwargs) self.data_format = data_format self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels, groups=4, data_format=data_format, name="conv1") self.dropout = nn.Dropout( rate=dropout_rate, name="dropout") self.conv2 = conv1x1( in_channels=mid_channels, out_channels=classes, use_bias=True, data_format=data_format, name="conv2") def call(self, x, training=None): axis = -1 if is_channels_first(self.data_format) else 1 x = tf.expand_dims(tf.expand_dims(x, axis=axis), axis=axis) x = self.conv1(x) x = self.dropout(x, training=training) x = self.conv2(x) return x class DiceNet(tf.keras.Model): """ DiCENet model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. classifier_mid_channels : int Number of middle channels for classifier. dropout_rate : float Parameter of Dropout layer in classifier. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, classifier_mid_channels, dropout_rate, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(DiceNet, self).__init__(**kwargs) assert ((in_size[0] % 32 == 0) and (in_size[1] % 32 == 0)) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(DiceInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels in_size = (in_size[0] // 4, in_size[1] // 4) for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): unit_class = StridedDiceBlock if j == 0 else ShuffledDiceBlock stage.add(unit_class( in_channels=in_channels, out_channels=out_channels, in_size=in_size, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels in_size = (in_size[0] // 2, in_size[1] // 2) if j == 0 else in_size self.features.add(stage) self.features.add(nn.GlobalAvgPool2D( data_format=data_format, name="final_pool")) self.output1 = DiceClassifier( in_channels=in_channels, mid_channels=classifier_mid_channels, classes=classes, dropout_rate=dropout_rate, data_format=data_format, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = self.output1(x, training=training) x = flatten(x, self.data_format) return x def get_dicenet(width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create DiCENet model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ channels_per_layers_dict = { 0.2: [32, 64, 128], 0.5: [48, 96, 192], 0.75: [86, 172, 344], 1.0: [116, 232, 464], 1.25: [144, 288, 576], 1.5: [176, 352, 704], 1.75: [210, 420, 840], 2.0: [244, 488, 976], 2.4: [278, 556, 1112], } if width_scale not in channels_per_layers_dict.keys(): raise ValueError("Unsupported DiceNet with width scale: {}".format(width_scale)) channels_per_layers = channels_per_layers_dict[width_scale] layers = [3, 7, 3] if width_scale > 0.2: init_block_channels = 24 else: init_block_channels = 16 channels = [[ci] * li for i, (ci, li) in enumerate(zip(channels_per_layers, layers))] for i in range(len(channels)): pred_channels = channels[i - 1][-1] if i != 0 else init_block_channels channels[i] = [pred_channels * 2] + channels[i] if width_scale > 2.0: classifier_mid_channels = 1280 else: classifier_mid_channels = 1024 if width_scale > 1.0: dropout_rate = 0.2 else: dropout_rate = 0.1 net = DiceNet( channels=channels, init_block_channels=init_block_channels, classifier_mid_channels=classifier_mid_channels, dropout_rate=dropout_rate, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def dicenet_wd5(**kwargs): """ DiCENet x0.2 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=0.2, model_name="dicenet_wd5", **kwargs) def dicenet_wd2(**kwargs): """ DiCENet x0.5 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=0.5, model_name="dicenet_wd2", **kwargs) def dicenet_w3d4(**kwargs): """ DiCENet x0.75 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=0.75, model_name="dicenet_w3d4", **kwargs) def dicenet_w1(**kwargs): """ DiCENet x1.0 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=1.0, model_name="dicenet_w1", **kwargs) def dicenet_w5d4(**kwargs): """ DiCENet x1.25 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=1.25, model_name="dicenet_w5d4", **kwargs) def dicenet_w3d2(**kwargs): """ DiCENet x1.5 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=1.5, model_name="dicenet_w3d2", **kwargs) def dicenet_w7d8(**kwargs): """ DiCENet x1.75 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=1.75, model_name="dicenet_w7d8", **kwargs) def dicenet_w2(**kwargs): """ DiCENet x2.0 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_dicenet(width_scale=2.0, model_name="dicenet_w2", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False models = [ dicenet_wd5, dicenet_wd2, dicenet_w3d4, dicenet_w1, dicenet_w5d4, dicenet_w3d2, dicenet_w7d8, dicenet_w2, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != dicenet_wd5 or weight_count == 1130704) assert (model != dicenet_wd2 or weight_count == 1214120) assert (model != dicenet_w3d4 or weight_count == 1495676) assert (model != dicenet_w1 or weight_count == 1805604) assert (model != dicenet_w5d4 or weight_count == 2162888) assert (model != dicenet_w3d2 or weight_count == 2652200) assert (model != dicenet_w7d8 or weight_count == 3264932) assert (model != dicenet_w2 or weight_count == 3979044) if __name__ == "__main__": _test()
29,544
31.431394
119
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/nvpattexp.py
""" Neural Voice Puppetry Audio-to-Expression net for speech-driven facial animation, implemented in TensorFlow. Original paper: 'Neural Voice Puppetry: Audio-driven Facial Reenactment,' https://arxiv.org/abs/1912.05566. """ __all__ = ['NvpAttExp', 'nvpattexp116bazel76'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import DenseBlock, ConvBlock, ConvBlock1d, SelectableDense, SimpleSequential, is_channels_first class NvpAttExpEncoder(nn.Layer): """ Neural Voice Puppetry Audio-to-Expression encoder. Parameters: ---------- audio_features : int Number of audio features (characters/sounds). audio_window_size : int Size of audio window (for time related audio features). seq_len : int, default Size of feature window. encoder_features : int Number of encoder features. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, audio_features, audio_window_size, seq_len, encoder_features, data_format="channels_last", **kwargs): super(NvpAttExpEncoder, self).__init__(**kwargs) self.audio_features = audio_features self.audio_window_size = audio_window_size self.seq_len = seq_len self.data_format = data_format conv_channels = (32, 32, 64, 64) conv_slopes = (0.02, 0.02, 0.2, 0.2) fc_channels = (128, 64, encoder_features) fc_slopes = (0.02, 0.02, None) att_conv_channels = (16, 8, 4, 2, 1) att_conv_slopes = 0.02 in_channels = audio_features self.conv_branch = SimpleSequential(name="conv_branch") for i, (out_channels, slope) in enumerate(zip(conv_channels, conv_slopes)): self.conv_branch.add(ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 1), strides=(2, 1), padding=(1, 0), use_bias=True, use_bn=False, activation=nn.LeakyReLU(alpha=slope), data_format=data_format, name="conv{}".format(i + 1))) in_channels = out_channels self.fc_branch = SimpleSequential(name="fc_branch") for i, (out_channels, slope) in enumerate(zip(fc_channels, fc_slopes)): activation = nn.LeakyReLU(alpha=slope) if slope is not None else "tanh" self.fc_branch.add(DenseBlock( in_channels=in_channels, out_channels=out_channels, use_bias=True, use_bn=False, activation=activation, data_format=data_format, name="fc{}".format(i + 1))) in_channels = out_channels self.att_conv_branch = SimpleSequential(name="att_conv_branch") for i, out_channels, in enumerate(att_conv_channels): self.att_conv_branch.add(ConvBlock1d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, strides=1, padding=1, use_bias=True, use_bn=False, activation=nn.LeakyReLU(alpha=att_conv_slopes), data_format=data_format, name="att_conv{}".format(i + 1))) in_channels = out_channels self.att_fc = DenseBlock( in_channels=seq_len, out_channels=seq_len, use_bias=True, use_bn=False, activation=nn.Softmax(axis=1), data_format=data_format, name="att_fc") def call(self, x, training=None): batch = x.shape[0] batch_seq_len = batch * self.seq_len if is_channels_first(self.data_format): x = tf.reshape(x, shape=(-1, 1, self.audio_window_size, self.audio_features)) x = tf.transpose(x, perm=(0, 3, 2, 1)) x = self.conv_branch(x) x = tf.squeeze(x, axis=-1) x = tf.reshape(x, shape=(batch_seq_len, 1, -1)) x = self.fc_branch(x) x = tf.reshape(x, shape=(batch, self.seq_len, -1)) x = tf.transpose(x, perm=(0, 2, 1)) y = x[:, :, (self.seq_len // 2)] w = self.att_conv_branch(x) w = tf.squeeze(w, axis=1) w = self.att_fc(w) w = tf.expand_dims(w, axis=-1) else: x = tf.transpose(x, perm=(0, 3, 1, 2)) x = tf.reshape(x, shape=(-1, 1, self.audio_window_size, self.audio_features)) x = tf.transpose(x, perm=(0, 2, 3, 1)) x = tf.transpose(x, perm=(0, 1, 3, 2)) x = self.conv_branch(x) x = tf.squeeze(x, axis=1) x = self.fc_branch(x) x = tf.reshape(x, shape=(batch, self.seq_len, -1)) y = x[:, (self.seq_len // 2), :] w = self.att_conv_branch(x) w = tf.squeeze(w, axis=-1) w = self.att_fc(w) w = tf.expand_dims(w, axis=-1) x = tf.transpose(x, perm=(0, 2, 1)) x = tf.keras.backend.batch_dot(x, w) x = tf.squeeze(x, axis=-1) return x, y class NvpAttExp(tf.keras.Model): """ Neural Voice Puppetry Audio-to-Expression model from 'Neural Voice Puppetry: Audio-driven Facial Reenactment,' https://arxiv.org/abs/1912.05566. Parameters: ---------- audio_features : int, default 29 Number of audio features (characters/sounds). audio_window_size : int, default 16 Size of audio window (for time related audio features). seq_len : int, default 8 Size of feature window. base_persons : int, default 116 Number of base persons (identities). blendshapes : int, default 76 Number of 3D model blendshapes. encoder_features : int, default 32 Number of encoder features. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, audio_features=29, audio_window_size=16, seq_len=8, base_persons=116, blendshapes=76, encoder_features=32, data_format="channels_last", **kwargs): super(NvpAttExp, self).__init__(**kwargs) self.base_persons = base_persons self.data_format = data_format self.encoder = NvpAttExpEncoder( audio_features=audio_features, audio_window_size=audio_window_size, seq_len=seq_len, encoder_features=encoder_features, data_format=data_format, name="encoder") self.decoder = SelectableDense( in_channels=encoder_features, out_channels=blendshapes, use_bias=False, num_options=base_persons, name="decoder") def call(self, x, pid, training=None): x, y = self.encoder(x, training=training) x = self.decoder(x, pid) y = self.decoder(y, pid) return x, y def get_nvpattexp(base_persons, blendshapes, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create Neural Voice Puppetry Audio-to-Expression model with specific parameters. Parameters: ---------- base_persons : int Number of base persons (subjects). blendshapes : int Number of 3D model blendshapes. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ net = NvpAttExp( base_persons=base_persons, blendshapes=blendshapes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def nvpattexp116bazel76(**kwargs): """ Neural Voice Puppetry Audio-to-Expression model for 116 base persons and Bazel topology with 76 blendshapes from 'Neural Voice Puppetry: Audio-driven Facial Reenactment,' https://arxiv.org/abs/1912.05566. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_nvpattexp(base_persons=116, blendshapes=76, model_name="nvpattexp116bazel76", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K # data_format = "channels_first" data_format = "channels_last" pretrained = False models = [ nvpattexp116bazel76, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 seq_len = 8 audio_window_size = 16 audio_features = 29 blendshapes = 76 x = tf.random.normal((batch, seq_len, audio_window_size, audio_features) if is_channels_first(data_format) else (batch, audio_window_size, audio_features, seq_len)) pid = tf.fill(dims=(batch,), value=3) y1, y2 = net(x, pid) assert (y1.shape == y2.shape == (batch, blendshapes)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != nvpattexp116bazel76 or weight_count == 327397) if __name__ == "__main__": _test()
10,488
34.435811
119
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/alexnet.py
""" AlexNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'One weird trick for parallelizing convolutional neural networks,' https://arxiv.org/abs/1404.5997. """ __all__ = ['AlexNet', 'alexnet', 'alexnetb'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import ConvBlock, MaxPool2d, SimpleSequential, flatten, is_channels_first class AlexConv(ConvBlock): """ AlexNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. use_lrn : bool Whether to use LRN layer. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, use_lrn, data_format="channels_last", **kwargs): super(AlexConv, self).__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, use_bias=True, use_bn=False, data_format=data_format, **kwargs) self.use_lrn = use_lrn def call(self, x, training=None): x = super(AlexConv, self).call(x, training=training) if self.use_lrn: x = tf.nn.lrn(x, bias=2, alpha=1e-4, beta=0.75) return x class AlexDense(nn.Layer): """ AlexNet specific dense block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels, **kwargs): super(AlexDense, self).__init__(**kwargs) self.fc = nn.Dense( units=out_channels, input_dim=in_channels, name="fc") self.activ = nn.ReLU() self.dropout = nn.Dropout( rate=0.5, name="dropout") def call(self, x, training=None): x = self.fc(x) x = self.activ(x) x = self.dropout(x, training=training) return x class AlexOutputBlock(nn.Layer): """ AlexNet specific output block. Parameters: ---------- in_channels : int Number of input channels. classes : int Number of classification classes. """ def __init__(self, in_channels, classes, **kwargs): super(AlexOutputBlock, self).__init__(**kwargs) mid_channels = 4096 self.fc1 = AlexDense( in_channels=in_channels, out_channels=mid_channels, name="fc1") self.fc2 = AlexDense( in_channels=mid_channels, out_channels=mid_channels, name="fc2") self.fc3 = nn.Dense( units=classes, input_dim=mid_channels, name="fc3") def call(self, x, training=None): x = self.fc1(x, training=training) x = self.fc2(x, training=training) x = self.fc3(x) return x class AlexNet(tf.keras.Model): """ AlexNet model from 'One weird trick for parallelizing convolutional neural networks,' https://arxiv.org/abs/1404.5997. Parameters: ---------- channels : list of list of int Number of output channels for each unit. kernel_sizes : list of list of int Convolution window sizes for each unit. strides : list of list of int or tuple/list of 2 int Strides of the convolution for each unit. paddings : list of list of int or tuple/list of 2 int Padding value for convolution layer for each unit. use_lrn : bool Whether to use LRN layer. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, kernel_sizes, strides, paddings, use_lrn, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(AlexNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") for i, channels_per_stage in enumerate(channels): use_lrn_i = use_lrn and (i in [0, 1]) stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): stage.add(AlexConv( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_sizes[i][j], strides=strides[i][j], padding=paddings[i][j], use_lrn=use_lrn_i, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels stage.add(MaxPool2d( pool_size=3, strides=2, padding=0, ceil_mode=True, data_format=data_format, name="pool{}".format(i + 1))) self.features.add(stage) in_channels = in_channels * 6 * 6 self.output1 = AlexOutputBlock( in_channels=in_channels, classes=classes, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x, training=training) return x def get_alexnet(version="a", model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create AlexNet model with specific parameters. Parameters: ---------- version : str, default 'a' Version of AlexNet ('a' or 'b'). model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if version == "a": channels = [[96], [256], [384, 384, 256]] kernel_sizes = [[11], [5], [3, 3, 3]] strides = [[4], [1], [1, 1, 1]] paddings = [[0], [2], [1, 1, 1]] use_lrn = True elif version == "b": channels = [[64], [192], [384, 256, 256]] kernel_sizes = [[11], [5], [3, 3, 3]] strides = [[4], [1], [1, 1, 1]] paddings = [[2], [2], [1, 1, 1]] use_lrn = False else: raise ValueError("Unsupported AlexNet version {}".format(version)) net = AlexNet( channels=channels, kernel_sizes=kernel_sizes, strides=strides, paddings=paddings, use_lrn=use_lrn, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def alexnet(**kwargs): """ AlexNet model from 'One weird trick for parallelizing convolutional neural networks,' https://arxiv.org/abs/1404.5997. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_alexnet(model_name="alexnet", **kwargs) def alexnetb(**kwargs): """ AlexNet-b model from 'One weird trick for parallelizing convolutional neural networks,' https://arxiv.org/abs/1404.5997. Non-standard version. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_alexnet(version="b", model_name="alexnetb", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False models = [ alexnet, alexnetb, ] for model in models: net = model(pretrained=pretrained) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != alexnet or weight_count == 62378344) assert (model != alexnetb or weight_count == 61100840) if __name__ == "__main__": _test()
10,247
29.960725
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/mobilenet_cub.py
""" MobileNet & FD-MobileNet for CUB-200-2011, implemented in TensorFlow. Original papers: - 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. - 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. """ __all__ = ['mobilenet_w1_cub', 'mobilenet_w3d4_cub', 'mobilenet_wd2_cub', 'mobilenet_wd4_cub', 'fdmobilenet_w1_cub', 'fdmobilenet_w3d4_cub', 'fdmobilenet_wd2_cub', 'fdmobilenet_wd4_cub'] from .common import is_channels_first from .mobilenet import get_mobilenet from .fdmobilenet import get_fdmobilenet def mobilenet_w1_cub(classes=200, **kwargs): """ 1.0 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenet(classes=classes, width_scale=1.0, model_name="mobilenet_w1_cub", **kwargs) def mobilenet_w3d4_cub(classes=200, **kwargs): """ 0.75 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenet(classes=classes, width_scale=0.75, model_name="mobilenet_w3d4_cub", **kwargs) def mobilenet_wd2_cub(classes=200, **kwargs): """ 0.5 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenet(classes=classes, width_scale=0.5, model_name="mobilenet_wd2_cub", **kwargs) def mobilenet_wd4_cub(classes=200, **kwargs): """ 0.25 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_mobilenet(classes=classes, width_scale=0.25, model_name="mobilenet_wd4_cub", **kwargs) def fdmobilenet_w1_cub(classes=200, **kwargs): """ FD-MobileNet 1.0x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_fdmobilenet(classes=classes, width_scale=1.0, model_name="fdmobilenet_w1_cub", **kwargs) def fdmobilenet_w3d4_cub(classes=200, **kwargs): """ FD-MobileNet 0.75x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_fdmobilenet(classes=classes, width_scale=0.75, model_name="fdmobilenet_w3d4_cub", **kwargs) def fdmobilenet_wd2_cub(classes=200, **kwargs): """ FD-MobileNet 0.5x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_fdmobilenet(classes=classes, width_scale=0.5, model_name="fdmobilenet_wd2_cub", **kwargs) def fdmobilenet_wd4_cub(classes=200, **kwargs): """ FD-MobileNet 0.25x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_fdmobilenet(classes=classes, width_scale=0.25, model_name="fdmobilenet_wd4_cub", **kwargs) def _test(): import numpy as np import tensorflow as tf import tensorflow.keras.backend as K data_format = "channels_last" # data_format = "channels_first" pretrained = False models = [ mobilenet_w1_cub, mobilenet_w3d4_cub, mobilenet_wd2_cub, mobilenet_wd4_cub, fdmobilenet_w1_cub, fdmobilenet_w3d4_cub, fdmobilenet_wd2_cub, fdmobilenet_wd4_cub, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 200)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != mobilenet_w1_cub or weight_count == 3411976) assert (model != mobilenet_w3d4_cub or weight_count == 1970360) assert (model != mobilenet_wd2_cub or weight_count == 921192) assert (model != mobilenet_wd4_cub or weight_count == 264472) assert (model != fdmobilenet_w1_cub or weight_count == 2081288) assert (model != fdmobilenet_w3d4_cub or weight_count == 1218104) assert (model != fdmobilenet_wd2_cub or weight_count == 583528) assert (model != fdmobilenet_wd4_cub or weight_count == 177560) if __name__ == "__main__": _test()
7,245
35.969388
120
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/wrn.py
""" WRN for ImageNet-1K, implemented in TensorFlow. Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. """ __all__ = ['WRN', 'wrn50_2'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import Conv2d, MaxPool2d, SimpleSequential, flatten, is_channels_first class WRNConv(nn.Layer): """ WRN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. activate : bool Whether activate the convolution block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, kernel_size, strides, padding, activate, data_format="channels_last", **kwargs): super(WRNConv, self).__init__(**kwargs) self.activate = activate self.conv = Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, use_bias=True, data_format=data_format, name="conv") if self.activate: self.activ = nn.ReLU() def call(self, x, training=None): x = self.conv(x) if self.activate: x = self.activ(x) return x def wrn_conv1x1(in_channels, out_channels, strides, activate, data_format="channels_last", **kwargs): """ 1x1 version of the WRN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. activate : bool Whether activate the convolution block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return WRNConv( in_channels=in_channels, out_channels=out_channels, kernel_size=1, strides=strides, padding=0, activate=activate, data_format=data_format, **kwargs) def wrn_conv3x3(in_channels, out_channels, strides, activate, data_format="channels_last", **kwargs): """ 3x3 version of the WRN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. activate : bool Whether activate the convolution block. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ return WRNConv( in_channels=in_channels, out_channels=out_channels, kernel_size=3, strides=strides, padding=1, activate=activate, data_format=data_format, **kwargs) class WRNBottleneck(nn.Layer): """ WRN bottleneck block for residual path in WRN unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. width_factor : float Wide scale factor for width of layers. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, width_factor, data_format="channels_last", **kwargs): super(WRNBottleneck, self).__init__(**kwargs) mid_channels = int(round(out_channels // 4 * width_factor)) self.conv1 = wrn_conv1x1( in_channels=in_channels, out_channels=mid_channels, strides=1, activate=True, data_format=data_format, name="conv1") self.conv2 = wrn_conv3x3( in_channels=mid_channels, out_channels=mid_channels, strides=strides, activate=True, data_format=data_format, name="conv2") self.conv3 = wrn_conv1x1( in_channels=mid_channels, out_channels=out_channels, strides=1, activate=False, data_format=data_format, name="conv3") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) return x class WRNUnit(nn.Layer): """ WRN unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. strides : int or tuple/list of 2 int Strides of the convolution. width_factor : float Wide scale factor for width of layers. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, strides, width_factor, data_format="channels_last", **kwargs): super(WRNUnit, self).__init__(**kwargs) self.resize_identity = (in_channels != out_channels) or (strides != 1) self.body = WRNBottleneck( in_channels=in_channels, out_channels=out_channels, strides=strides, width_factor=width_factor, data_format=data_format, name="body") if self.resize_identity: self.identity_conv = wrn_conv1x1( in_channels=in_channels, out_channels=out_channels, strides=strides, activate=False, data_format=data_format, name="identity_conv") self.activ = nn.ReLU() def call(self, x, training=None): if self.resize_identity: identity = self.identity_conv(x, training=training) else: identity = x x = self.body(x, training=training) x = x + identity x = self.activ(x) return x class WRNInitBlock(nn.Layer): """ WRN specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(WRNInitBlock, self).__init__(**kwargs) self.conv = WRNConv( in_channels=in_channels, out_channels=out_channels, kernel_size=7, strides=2, padding=3, activate=True, data_format=data_format, name="conv") self.pool = MaxPool2d( pool_size=3, strides=2, padding=1, data_format=data_format, name="pool") def call(self, x, training=None): x = self.conv(x, training=training) x = self.pool(x) return x class WRN(tf.keras.Model): """ WRN model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. width_factor : float Wide scale factor for width of layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, width_factor, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(WRN, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = SimpleSequential(name="features") self.features.add(WRNInitBlock( in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(WRNUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, width_factor=width_factor, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1") def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_wrn(blocks, width_factor, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create WRN model with specific parameters. Parameters: ---------- blocks : int Number of blocks. width_factor : float Wide scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported WRN with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = WRN( channels=channels, init_block_channels=init_block_channels, width_factor=width_factor, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def wrn50_2(**kwargs): """ WRN-50-2 model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_wrn(blocks=50, width_factor=2.0, model_name="wrn50_2", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ wrn50_2, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != wrn50_2 or weight_count == 68849128) if __name__ == "__main__": _test()
13,742
28.941176
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/inceptionv3.py
""" InceptionV3 for ImageNet-1K, implemented in TensorFlow. Original paper: 'Rethinking the Inception Architecture for Computer Vision,' https://arxiv.org/abs/1512.00567. """ __all__ = ['InceptionV3', 'inceptionv3', 'MaxPoolBranch', 'AvgPoolBranch', 'Conv1x1Branch', 'ConvSeqBranch'] import os import tensorflow as tf import tensorflow.keras.layers as nn from .common import MaxPool2d, AvgPool2d, ConvBlock, conv1x1_block, conv3x3_block, SimpleSequential, Concurrent,\ flatten, is_channels_first, get_channel_axis class MaxPoolBranch(nn.Layer): """ Inception specific max pooling branch block. Parameters: ---------- data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, data_format="channels_last", **kwargs): super(MaxPoolBranch, self).__init__(**kwargs) self.pool = MaxPool2d( pool_size=3, strides=2, padding=0, data_format=data_format, name="pool") def call(self, x, training=None): x = self.pool(x) return x class AvgPoolBranch(nn.Layer): """ Inception specific average pooling branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. count_include_pad : bool, default True Whether to include the zero-padding in the averaging calculation. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, bn_eps, count_include_pad=True, data_format="channels_last", **kwargs): super(AvgPoolBranch, self).__init__(**kwargs) assert (count_include_pad or not count_include_pad) self.pool = AvgPool2d( pool_size=3, strides=1, padding=1, data_format=data_format, name="pool") self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps, data_format=data_format, name="conv") def call(self, x, training=None): x = self.pool(x) x = self.conv(x, training=training) return x class Conv1x1Branch(nn.Layer): """ Inception specific convolutional 1x1 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, bn_eps, data_format="channels_last", **kwargs): super(Conv1x1Branch, self).__init__(**kwargs) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps, data_format=data_format, name="conv") def call(self, x, training=None): x = self.conv(x, training=training) return x class ConvSeqBranch(nn.Layer): """ Inception specific convolutional sequence branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of tuple of int List of numbers of output channels. kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int List of convolution window sizes. strides_list : list of tuple of int or tuple of tuple/list of 2 int List of strides of the convolution. padding_list : list of tuple of int or tuple of tuple/list of 2 int List of padding values for convolution layers. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels_list, kernel_size_list, strides_list, padding_list, bn_eps, data_format="channels_last", **kwargs): super(ConvSeqBranch, self).__init__(**kwargs) assert (len(out_channels_list) == len(kernel_size_list)) assert (len(out_channels_list) == len(strides_list)) assert (len(out_channels_list) == len(padding_list)) self.conv_list = SimpleSequential(name="conv_list") for i, (out_channels, kernel_size, strides, padding) in enumerate(zip( out_channels_list, kernel_size_list, strides_list, padding_list)): self.conv_list.children.append(ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, bn_eps=bn_eps, data_format=data_format, name="conv{}".format(i + 1))) in_channels = out_channels def call(self, x, training=None): x = self.conv_list(x, training=training) return x class ConvSeq3x3Branch(nn.Layer): """ InceptionV3 specific convolutional sequence branch block with splitting by 3x3. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of tuple of int List of numbers of output channels. kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int List of convolution window sizes. strides_list : list of tuple of int or tuple of tuple/list of 2 int List of strides of the convolution. padding_list : list of tuple of int or tuple of tuple/list of 2 int List of padding values for convolution layers. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels_list, kernel_size_list, strides_list, padding_list, bn_eps, data_format="channels_last", **kwargs): super(ConvSeq3x3Branch, self).__init__(**kwargs) self.data_format = data_format self.conv_list = SimpleSequential(name="conv_list") for i, (out_channels, kernel_size, strides, padding) in enumerate(zip( out_channels_list, kernel_size_list, strides_list, padding_list)): self.conv_list.children.append(ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, bn_eps=bn_eps, data_format=data_format, name="conv{}".format(i + 1))) in_channels = out_channels self.conv1x3 = ConvBlock( in_channels=in_channels, out_channels=in_channels, kernel_size=(1, 3), strides=1, padding=(0, 1), bn_eps=bn_eps, data_format=data_format, name="conv1x3") self.conv3x1 = ConvBlock( in_channels=in_channels, out_channels=in_channels, kernel_size=(3, 1), strides=1, padding=(1, 0), bn_eps=bn_eps, data_format=data_format, name="conv3x1") def call(self, x, training=None): x = self.conv_list(x, training=training) y1 = self.conv1x3(x, training=training) y2 = self.conv3x1(x, training=training) x = tf.concat([y1, y2], axis=get_channel_axis(self.data_format)) return x class InceptionAUnit(nn.Layer): """ InceptionV3 type Inception-A unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, bn_eps, data_format="channels_last", **kwargs): super(InceptionAUnit, self).__init__(**kwargs) assert (out_channels > 224) pool_out_channels = out_channels - 224 self.branches = Concurrent( data_format=data_format, name="branches") self.branches.children.append(Conv1x1Branch( in_channels=in_channels, out_channels=64, bn_eps=bn_eps, data_format=data_format, name="branch1")) self.branches.children.append(ConvSeqBranch( in_channels=in_channels, out_channels_list=(48, 64), kernel_size_list=(1, 5), strides_list=(1, 1), padding_list=(0, 2), bn_eps=bn_eps, data_format=data_format, name="branch2")) self.branches.children.append(ConvSeqBranch( in_channels=in_channels, out_channels_list=(64, 96, 96), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 1), padding_list=(0, 1, 1), bn_eps=bn_eps, data_format=data_format, name="branch3")) self.branches.children.append(AvgPoolBranch( in_channels=in_channels, out_channels=pool_out_channels, bn_eps=bn_eps, data_format=data_format, name="branch4")) def call(self, x, training=None): x = self.branches(x, training=training) return x class ReductionAUnit(nn.Layer): """ InceptionV3 type Reduction-A unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, bn_eps, data_format="channels_last", **kwargs): super(ReductionAUnit, self).__init__(**kwargs) assert (in_channels == 288) assert (out_channels == 768) self.branches = Concurrent( data_format=data_format, name="branches") self.branches.children.append(ConvSeqBranch( in_channels=in_channels, out_channels_list=(384,), kernel_size_list=(3,), strides_list=(2,), padding_list=(0,), bn_eps=bn_eps, data_format=data_format, name="branch1")) self.branches.children.append(ConvSeqBranch( in_channels=in_channels, out_channels_list=(64, 96, 96), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 2), padding_list=(0, 1, 0), bn_eps=bn_eps, data_format=data_format, name="branch2")) self.branches.children.append(MaxPoolBranch( data_format=data_format, name="branch3")) def call(self, x, training=None): x = self.branches(x, training=training) return x class InceptionBUnit(nn.Layer): """ InceptionV3 type Inception-B unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of output channels in the 7x7 branches. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, mid_channels, bn_eps, data_format="channels_last", **kwargs): super(InceptionBUnit, self).__init__(**kwargs) assert (in_channels == 768) assert (out_channels == 768) self.branches = Concurrent( data_format=data_format, name="branches") self.branches.children.append(Conv1x1Branch( in_channels=in_channels, out_channels=192, bn_eps=bn_eps, data_format=data_format, name="branch1")) self.branches.children.append(ConvSeqBranch( in_channels=in_channels, out_channels_list=(mid_channels, mid_channels, 192), kernel_size_list=(1, (1, 7), (7, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 3), (3, 0)), bn_eps=bn_eps, data_format=data_format, name="branch2")) self.branches.children.append(ConvSeqBranch( in_channels=in_channels, out_channels_list=(mid_channels, mid_channels, mid_channels, mid_channels, 192), kernel_size_list=(1, (7, 1), (1, 7), (7, 1), (1, 7)), strides_list=(1, 1, 1, 1, 1), padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3)), bn_eps=bn_eps, data_format=data_format, name="branch3")) self.branches.children.append(AvgPoolBranch( in_channels=in_channels, out_channels=192, bn_eps=bn_eps, data_format=data_format, name="branch4")) def call(self, x, training=None): x = self.branches(x, training=training) return x class ReductionBUnit(nn.Layer): """ InceptionV3 type Reduction-B unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, bn_eps, data_format="channels_last", **kwargs): super(ReductionBUnit, self).__init__(**kwargs) assert (in_channels == 768) assert (out_channels == 1280) self.branches = Concurrent( data_format=data_format, name="branches") self.branches.children.append(ConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 320), kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0), bn_eps=bn_eps, data_format=data_format, name="branch1")) self.branches.children.append(ConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 192, 192, 192), kernel_size_list=(1, (1, 7), (7, 1), 3), strides_list=(1, 1, 1, 2), padding_list=(0, (0, 3), (3, 0), 0), bn_eps=bn_eps, data_format=data_format, name="branch2")) self.branches.children.append(MaxPoolBranch( data_format=data_format, name="branch3")) def call(self, x, training=None): x = self.branches(x, training=training) return x class InceptionCUnit(nn.Layer): """ InceptionV3 type Inception-C unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, bn_eps, data_format="channels_last", **kwargs): super(InceptionCUnit, self).__init__(**kwargs) assert (out_channels == 2048) self.branches = Concurrent( data_format=data_format, name="branches") self.branches.children.append(Conv1x1Branch( in_channels=in_channels, out_channels=320, bn_eps=bn_eps, data_format=data_format, name="branch1")) self.branches.children.append(ConvSeq3x3Branch( in_channels=in_channels, out_channels_list=(384,), kernel_size_list=(1,), strides_list=(1,), padding_list=(0,), bn_eps=bn_eps, data_format=data_format, name="branch2")) self.branches.children.append(ConvSeq3x3Branch( in_channels=in_channels, out_channels_list=(448, 384), kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 1), bn_eps=bn_eps, data_format=data_format, name="branch3")) self.branches.children.append(AvgPoolBranch( in_channels=in_channels, out_channels=192, bn_eps=bn_eps, data_format=data_format, name="branch4")) def call(self, x, training=None): x = self.branches(x, training=training) return x class InceptInitBlock(nn.Layer): """ InceptionV3 specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, in_channels, out_channels, bn_eps, data_format="channels_last", **kwargs): super(InceptInitBlock, self).__init__(**kwargs) assert (out_channels == 192) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=32, strides=2, padding=0, bn_eps=bn_eps, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=32, out_channels=32, strides=1, padding=0, bn_eps=bn_eps, data_format=data_format, name="conv2") self.conv3 = conv3x3_block( in_channels=32, out_channels=64, strides=1, padding=1, bn_eps=bn_eps, data_format=data_format, name="conv3") self.pool1 = MaxPool2d( pool_size=3, strides=2, padding=0, data_format=data_format, name="pool1") self.conv4 = conv1x1_block( in_channels=64, out_channels=80, strides=1, padding=0, bn_eps=bn_eps, data_format=data_format, name="conv4") self.conv5 = conv3x3_block( in_channels=80, out_channels=192, strides=1, padding=0, bn_eps=bn_eps, data_format=data_format, name="conv5") self.pool2 = MaxPool2d( pool_size=3, strides=2, padding=0, data_format=data_format, name="pool2") def call(self, x, training=None): x = self.conv1(x, training=training) x = self.conv2(x, training=training) x = self.conv3(x, training=training) x = self.pool1(x) x = self.conv4(x, training=training) x = self.conv5(x, training=training) x = self.pool2(x) return x class InceptionV3(tf.keras.Model): """ InceptionV3 model from 'Rethinking the Inception Architecture for Computer Vision,' https://arxiv.org/abs/1512.00567. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. b_mid_channels : list of int Number of middle channels for each Inception-B unit. dropout_rate : float, default 0.0 Fraction of the input units to drop. Must be a number between 0 and 1. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (299, 299) Spatial size of the expected input image. classes : int, default 1000 Number of classification classes. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ def __init__(self, channels, init_block_channels, b_mid_channels, dropout_rate=0.5, bn_eps=1e-5, in_channels=3, in_size=(299, 299), classes=1000, data_format="channels_last", **kwargs): super(InceptionV3, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit] reduction_units = [ReductionAUnit, ReductionBUnit] self.features = SimpleSequential(name="features") self.features.add(InceptInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SimpleSequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): if (j == 0) and (i != 0): unit = reduction_units[i - 1] else: unit = normal_units[i] if unit == InceptionBUnit: stage.add(unit( in_channels=in_channels, out_channels=out_channels, mid_channels=b_mid_channels[j - 1], bn_eps=bn_eps, data_format=data_format, name="unit{}".format(j + 1))) else: stage.add(unit( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=8, strides=1, data_format=data_format, name="final_pool")) self.output1 = SimpleSequential(name="output1") self.output1.add(nn.Dropout( rate=dropout_rate, name="dropout")) self.output1.add(nn.Dense( units=classes, input_dim=in_channels, name="fc")) def call(self, x, training=None): x = self.features(x, training=training) x = flatten(x, self.data_format) x = self.output1(x) return x def get_inceptionv3(model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create InceptionV3 model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ init_block_channels = 192 channels = [[256, 288, 288], [768, 768, 768, 768, 768], [1280, 2048, 2048]] b_mid_channels = [128, 160, 160, 192] net = InceptionV3( channels=channels, init_block_channels=init_block_channels, b_mid_channels=b_mid_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def inceptionv3(**kwargs): """ InceptionV3 model from 'Rethinking the Inception Architecture for Computer Vision,' https://arxiv.org/abs/1512.00567. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_inceptionv3(model_name="inceptionv3", bn_eps=1e-3, **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K data_format = "channels_last" pretrained = False models = [ inceptionv3, ] for model in models: net = model(pretrained=pretrained, data_format=data_format) batch = 14 x = tf.random.normal((batch, 3, 299, 299) if is_channels_first(data_format) else (batch, 299, 299, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != inceptionv3 or weight_count == 23834568) if __name__ == "__main__": _test()
26,989
31.715152
115
py
imgclsmob
imgclsmob-master/tensorflow2/tf2cv/models/fdmobilenet.py
""" FD-MobileNet for ImageNet-1K, implemented in TensorFlow. Original paper: 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. """ __all__ = ['fdmobilenet_w1', 'fdmobilenet_w3d4', 'fdmobilenet_wd2', 'fdmobilenet_wd4', 'get_fdmobilenet'] import os import tensorflow as tf from .mobilenet import MobileNet def get_fdmobilenet(width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create FD-MobileNet model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 1024]] first_stage_stride = True if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] net = MobileNet( channels=channels, first_stage_stride=first_stage_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net def fdmobilenet_w1(**kwargs): """ FD-MobileNet 1.0x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_fdmobilenet(width_scale=1.0, model_name="fdmobilenet_w1", **kwargs) def fdmobilenet_w3d4(**kwargs): """ FD-MobileNet 0.75x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_fdmobilenet(width_scale=0.75, model_name="fdmobilenet_w3d4", **kwargs) def fdmobilenet_wd2(**kwargs): """ FD-MobileNet 0.5x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_fdmobilenet(width_scale=0.5, model_name="fdmobilenet_wd2", **kwargs) def fdmobilenet_wd4(**kwargs): """ FD-MobileNet 0.25x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_fdmobilenet(width_scale=0.25, model_name="fdmobilenet_wd4", **kwargs) def _test(): import numpy as np import tensorflow.keras.backend as K pretrained = False models = [ fdmobilenet_w1, fdmobilenet_w3d4, fdmobilenet_wd2, fdmobilenet_wd4, ] for model in models: net = model(pretrained=pretrained) batch = 14 x = tf.random.normal((batch, 224, 224, 3)) y = net(x) assert (tuple(y.shape.as_list()) == (batch, 1000)) weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights]) print("m={}, {}".format(model.__name__, weight_count)) assert (model != fdmobilenet_w1 or weight_count == 2901288) assert (model != fdmobilenet_w3d4 or weight_count == 1833304) assert (model != fdmobilenet_wd2 or weight_count == 993928) assert (model != fdmobilenet_wd4 or weight_count == 383160) if __name__ == "__main__": _test()
4,966
31.677632
115
py
imgclsmob
imgclsmob-master/tensorflow2/metrics/seg_metrics_np.py
""" Routines for segmentation metrics on numpy. """ import numpy as np __all__ = ['seg_pixel_accuracy_np', 'segm_mean_accuracy_hmasks', 'segm_mean_accuracy', 'seg_mean_iou_np', 'segm_mean_iou2', 'seg_mean_iou_imasks_np', 'segm_fw_iou_hmasks', 'segm_fw_iou'] def seg_pixel_accuracy_np(label_imask, pred_imask, vague_idx=-1, use_vague=False, macro_average=True, empty_result=0.0): """ The segmentation pixel accuracy. Parameters: ---------- label_imask : np.array Ground truth index mask (maybe batch of). pred_imask : np.array Predicted index mask (maybe batch of). vague_idx : int, default -1 Index of masked pixels. use_vague : bool, default False Whether to use pixel masking. macro_average : bool, default True Whether to use micro or macro averaging. empty_result : float, default 0.0 Result value for an image without any classes. Returns: ------- float or tuple of two ints PA metric value. """ assert (label_imask.shape == pred_imask.shape) if use_vague: sum_u_ij = np.sum(label_imask.flat != vague_idx) if sum_u_ij == 0: if macro_average: return empty_result else: return 0, 0 sum_u_ii = np.sum(np.logical_and(pred_imask.flat == label_imask.flat, label_imask.flat != vague_idx)) else: sum_u_ii = np.sum(pred_imask.flat == label_imask.flat) sum_u_ij = pred_imask.size if macro_average: return float(sum_u_ii) / sum_u_ij else: return sum_u_ii, sum_u_ij def segm_mean_accuracy_hmasks(label_hmask, pred_hmask): """ The segmentation mean accuracy. Parameters: ---------- label_hmask : np.array Ground truth one-hot mask. pred_hmask : np.array Predicted one-hot mask. Returns: ------- float MA metric value. """ assert (pred_hmask.shape == label_hmask.shape) assert (len(pred_hmask.shape) == 3) n = label_hmask.shape[0] i_sum = 0 acc_sum = 0.0 for i in range(n): class_i_pred_mask = pred_hmask[i, :, :] class_i_label_mask = label_hmask[i, :, :] u_i = np.sum(class_i_label_mask) if u_i == 0: continue u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask)) class_acc = float(u_ii) / u_i acc_sum += class_acc i_sum += 1 if i_sum > 0: mean_acc = acc_sum / i_sum else: mean_acc = 1.0 return mean_acc def segm_mean_accuracy(label_hmask, pred_imask): """ The segmentation mean accuracy. Parameters: ---------- label_hmask : np.array Ground truth one-hot mask. pred_imask : np.array Predicted index mask. Returns: ------- float MA metric value. """ assert (len(label_hmask.shape) == 3) assert (len(pred_imask.shape) == 2) assert (pred_imask.shape == label_hmask.shape[1:]) n = label_hmask.shape[0] i_sum = 0 acc_sum = 0.0 for i in range(n): class_i_pred_mask = (pred_imask == i) class_i_label_mask = label_hmask[i, :, :] u_i = np.sum(class_i_label_mask) if u_i == 0: continue u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask)) class_acc = float(u_ii) / u_i acc_sum += class_acc i_sum += 1 if i_sum > 0: mean_acc = acc_sum / i_sum else: mean_acc = 1.0 return mean_acc def segm_mean_iou_imasks(label_hmask, pred_hmask): """ The segmentation mean accuracy. Parameters: ---------- label_hmask : np.array Ground truth one-hot mask. pred_hmask : np.array Predicted one-hot mask. Returns: ------- float MA metric value. """ assert (pred_hmask.shape == label_hmask.shape) assert (len(pred_hmask.shape) == 3) n = label_hmask.shape[0] i_sum = 0 acc_sum = 0.0 for i in range(n): class_i_pred_mask = pred_hmask[i, :, :] class_i_label_mask = label_hmask[i, :, :] u_i = np.sum(class_i_label_mask) if u_i == 0: continue u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask)) class_acc = float(u_ii) / u_i acc_sum += class_acc i_sum += 1 if i_sum > 0: mean_acc = acc_sum / i_sum else: mean_acc = 1.0 return mean_acc def seg_mean_iou_np(label_hmask, pred_imask): """ The segmentation mean intersection over union. Parameters: ---------- label_hmask : np.array Ground truth one-hot mask. pred_imask : np.array Predicted index mask. Returns: ------- float MIoU metric value. """ assert (len(label_hmask.shape) == 3) assert (len(pred_imask.shape) == 2) assert (pred_imask.shape == label_hmask.shape[1:]) n = label_hmask.shape[0] i_sum = 0 acc_iou = 0.0 for i in range(n): class_i_pred_mask = (pred_imask == i) class_i_label_mask = label_hmask[i, :, :] u_i = np.sum(class_i_label_mask) u_ji_sj = np.sum(class_i_pred_mask) if (u_i + u_ji_sj) == 0: continue u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask)) acc_iou += float(u_ii) / (u_i + u_ji_sj - u_ii) i_sum += 1 if i_sum > 0: mean_iou = acc_iou / i_sum else: mean_iou = 1.0 return mean_iou def segm_mean_iou2(label_hmask, pred_hmask): """ The segmentation mean intersection over union. Parameters: ---------- label_hmask : nd.array Ground truth one-hot mask (batch of). pred_hmask : nd.array Predicted one-hot mask (batch of). Returns: ------- float MIoU metric value. """ assert (len(label_hmask.shape) == 4) assert (len(pred_hmask.shape) == 4) assert (pred_hmask.shape == label_hmask.shape) eps = np.finfo(np.float32).eps class_axis = 1 # The axis that represents classes inter_hmask = label_hmask * pred_hmask u_i = label_hmask.sum(axis=[2, 3]) u_ji_sj = pred_hmask.sum(axis=[2, 3]) u_ii = inter_hmask.sum(axis=[2, 3]) class_count = (u_i + u_ji_sj > 0.0).sum(axis=class_axis) + eps class_acc = u_ii / (u_i + u_ji_sj - u_ii + eps) acc_iou = class_acc.sum(axis=class_axis) + eps mean_iou = (acc_iou / class_count).mean().asscalar() return mean_iou def seg_mean_iou_imasks_np(label_imask, pred_imask, num_classes, vague_idx=-1, use_vague=False, bg_idx=-1, ignore_bg=False, macro_average=True, empty_result=0.0): """ The segmentation mean intersection over union. Parameters: ---------- label_imask : nd.array Ground truth index mask (batch of). pred_imask : nd.array Predicted index mask (batch of). num_classes : int Number of classes. vague_idx : int, default -1 Index of masked pixels. use_vague : bool, default False Whether to use pixel masking. bg_idx : int, default -1 Index of background class. ignore_bg : bool, default False Whether to ignore background class. macro_average : bool, default True Whether to use micro or macro averaging. empty_result : float, default 0.0 Result value for an image without any classes. Returns: ------- float or tuple of two np.arrays of int MIoU metric value. """ assert (len(label_imask.shape) == 2) assert (len(pred_imask.shape) == 2) assert (pred_imask.shape == label_imask.shape) min_i = 1 max_i = num_classes n_bins = num_classes if ignore_bg: n_bins -= 1 if bg_idx != 0: assert (bg_idx == num_classes - 1) max_i -= 1 if not (ignore_bg and (bg_idx == 0)): label_imask += 1 pred_imask += 1 vague_idx += 1 if use_vague: label_imask = label_imask * (label_imask != vague_idx) pred_imask = pred_imask * (pred_imask != vague_idx) intersection = pred_imask * (pred_imask == label_imask) area_inter, _ = np.histogram(intersection, bins=n_bins, range=(min_i, max_i)) area_pred, _ = np.histogram(pred_imask, bins=n_bins, range=(min_i, max_i)) area_label, _ = np.histogram(label_imask, bins=n_bins, range=(min_i, max_i)) area_union = area_pred + area_label - area_inter assert ((not ignore_bg) or (len(area_inter) == num_classes - 1)) assert (ignore_bg or (len(area_inter) == num_classes)) if macro_average: class_count = (area_union > 0).sum() if class_count == 0: return empty_result eps = np.finfo(np.float32).eps area_union = area_union + eps mean_iou = (area_inter / area_union).sum() / class_count return mean_iou else: return area_inter.astype(np.uint64), area_union.astype(np.uint64) def segm_fw_iou_hmasks(label_hmask, pred_hmask): """ The segmentation frequency weighted intersection over union. Parameters: ---------- label_hmask : np.array Ground truth one-hot mask. pred_hmask : np.array Predicted one-hot mask. Returns: ------- float FrIoU metric value. """ assert (pred_hmask.shape == label_hmask.shape) assert (len(pred_hmask.shape) == 3) n = label_hmask.shape[0] acc_iou = 0.0 for i in range(n): class_i_pred_mask = pred_hmask[i, :, :] class_i_label_mask = label_hmask[i, :, :] u_i = np.sum(class_i_label_mask) u_ji_sj = np.sum(class_i_pred_mask) if (u_i + u_ji_sj) == 0: continue u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask)) acc_iou += float(u_i * u_ii) / (u_i + u_ji_sj - u_ii) fw_factor = pred_hmask[0].size return acc_iou / fw_factor def segm_fw_iou(label_hmask, pred_imask): """ The segmentation frequency weighted intersection over union. Parameters: ---------- label_hmask : np.array Ground truth one-hot mask. pred_imask : np.array Predicted index mask. Returns: ------- float FrIoU metric value. """ assert (len(label_hmask.shape) == 3) assert (len(pred_imask.shape) == 2) assert (pred_imask.shape == label_hmask.shape[1:]) n = label_hmask.shape[0] acc_iou = 0.0 for i in range(n): class_i_pred_mask = (pred_imask == i) class_i_label_mask = label_hmask[i, :, :] u_i = np.sum(class_i_label_mask) u_ji_sj = np.sum(class_i_pred_mask) if (u_i + u_ji_sj) == 0: continue u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask)) acc_iou += float(u_i * u_ii) / (u_i + u_ji_sj - u_ii) fw_factor = pred_imask.size return acc_iou / fw_factor
11,447
25.5
109
py
imgclsmob
imgclsmob-master/tensorflow2/metrics/seg_metrics.py
""" Evaluation Metrics for Semantic Segmentation. """ import numpy as np from .metric import EvalMetric from .seg_metrics_np import seg_pixel_accuracy_np, seg_mean_iou_imasks_np __all__ = ['PixelAccuracyMetric', 'MeanIoUMetric'] class PixelAccuracyMetric(EvalMetric): """ Computes the pixel-wise accuracy. Parameters: ---------- axis : int, default -1 The axis that represents classes. name : str, default 'pix_acc' Name of this metric instance for display. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. on_cpu : bool, default True Calculate on CPU. sparse_label : bool, default True Whether label is an integer array instead of probability distribution. vague_idx : int, default -1 Index of masked pixels. use_vague : bool, default False Whether to use pixel masking. macro_average : bool, default True Whether to use micro or macro averaging. """ def __init__(self, axis=-1, name="pix_acc", output_names=None, label_names=None, on_cpu=True, sparse_label=True, vague_idx=-1, use_vague=False, macro_average=True): self.macro_average = macro_average super(PixelAccuracyMetric, self).__init__( name, axis=axis, output_names=output_names, label_names=label_names) self.axis = axis self.on_cpu = on_cpu self.sparse_label = sparse_label self.vague_idx = vague_idx self.use_vague = use_vague def update(self, labels, preds): """ Updates the internal evaluation result. Parameters: ---------- labels : tensor The labels of the data. preds : tensor Predicted values. """ if self.on_cpu: if self.sparse_label: label_imask = labels.numpy().astype(np.int32) else: label_imask = np.argmax(labels.numpy(), axis=self.axis).astype(np.int32) pred_imask = np.argmax(preds.numpy(), axis=self.axis).astype(np.int32) acc = seg_pixel_accuracy_np( label_imask=label_imask, pred_imask=pred_imask, vague_idx=self.vague_idx, use_vague=self.use_vague, macro_average=self.macro_average) if self.macro_average: self.sum_metric += acc self.num_inst += 1 else: self.sum_metric += acc[0] self.num_inst += acc[1] else: assert False def reset(self): """ Resets the internal evaluation result to initial state. """ if self.macro_average: self.num_inst = 0 self.sum_metric = 0.0 else: self.num_inst = 0 self.sum_metric = 0 def get(self): """ Gets the current evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ if self.macro_average: if self.num_inst == 0: return self.name, float("nan") else: return self.name, self.sum_metric / self.num_inst else: if self.num_inst == 0: return self.name, float("nan") else: return self.name, float(self.sum_metric) / self.num_inst class MeanIoUMetric(EvalMetric): """ Computes the mean intersection over union. Parameters: ---------- axis : int, default -1 The axis that represents classes name : str, default 'mean_iou' Name of this metric instance for display. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. on_cpu : bool, default True Calculate on CPU. sparse_label : bool, default True Whether label is an integer array instead of probability distribution. num_classes : int Number of classes vague_idx : int, default -1 Index of masked pixels. use_vague : bool, default False Whether to use pixel masking. bg_idx : int, default -1 Index of background class. ignore_bg : bool, default False Whether to ignore background class. macro_average : bool, default True Whether to use micro or macro averaging. """ def __init__(self, axis=-1, name="mean_iou", output_names=None, label_names=None, on_cpu=True, sparse_label=True, num_classes=None, vague_idx=-1, use_vague=False, bg_idx=-1, ignore_bg=False, macro_average=True): self.macro_average = macro_average self.num_classes = num_classes self.ignore_bg = ignore_bg super(MeanIoUMetric, self).__init__( name, axis=axis, output_names=output_names, label_names=label_names) assert ((not ignore_bg) or (bg_idx in (0, num_classes - 1))) self.axis = axis self.on_cpu = on_cpu self.sparse_label = sparse_label self.vague_idx = vague_idx self.use_vague = use_vague self.bg_idx = bg_idx assert (on_cpu and sparse_label) def update(self, labels, preds): """ Updates the internal evaluation result. Parameters: ---------- labels : tensor The labels of the data. preds : tensor Predicted values. """ if self.on_cpu: if self.sparse_label: label_imask = labels.numpy().astype(np.int32) else: assert False pred_imask = np.argmax(preds.numpy(), axis=self.axis).astype(np.int32) if self.sparse_label: assert (label_imask.shape[0] == 1) assert (pred_imask.shape[0] == 1) label_imask = np.squeeze(label_imask, axis=0) pred_imask = np.squeeze(pred_imask, axis=0) acc = seg_mean_iou_imasks_np( label_imask=label_imask, pred_imask=pred_imask, num_classes=self.num_classes, vague_idx=self.vague_idx, use_vague=self.use_vague, bg_idx=self.bg_idx, ignore_bg=self.ignore_bg, macro_average=self.macro_average) else: assert False if self.macro_average: self.sum_metric += acc self.num_inst += 1 else: self.area_inter += acc[0] self.area_union += acc[1] else: assert False def reset(self): """ Resets the internal evaluation result to initial state. """ if self.macro_average: self.num_inst = 0 self.sum_metric = 0.0 else: class_count = self.num_classes - 1 if self.ignore_bg else self.num_classes self.area_inter = np.zeros((class_count,), np.uint64) self.area_union = np.zeros((class_count,), np.uint64) def get(self): """ Gets the current evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ if self.macro_average: if self.num_inst == 0: return self.name, float("nan") else: return self.name, self.sum_metric / self.num_inst else: class_count = (self.area_union > 0).sum() if class_count == 0: return self.name, float("nan") eps = np.finfo(np.float32).eps area_union_eps = self.area_union + eps mean_iou = (self.area_inter / area_union_eps).sum() / class_count return self.name, mean_iou
8,898
32.081784
88
py
imgclsmob
imgclsmob-master/tensorflow2/metrics/cls_metrics.py
""" Evaluation Metrics for Image Classification. """ import tensorflow as tf from .metric import EvalMetric __all__ = ['Top1Error', 'TopKError'] class Accuracy(EvalMetric): """ Computes accuracy classification score. Parameters: ---------- axis : int, default 1 The axis that represents classes name : str, default 'accuracy' Name of this metric instance for display. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. """ def __init__(self, axis=1, name="accuracy", output_names=None, label_names=None): super(Accuracy, self).__init__( name, axis=axis, output_names=output_names, label_names=label_names, has_global_stats=True) self.axis = axis self.base_acc = tf.keras.metrics.SparseCategoricalAccuracy(name="acc") def update(self, labels, preds): """ Updates the internal evaluation result. Parameters: ---------- labels : tensor The labels of the data with class indices as values, one per sample. preds : tensor Prediction values for samples. Each prediction value can either be the class index, or a vector of likelihoods for all classes. """ self.base_acc.update_state(labels, preds) def get(self): """ Gets the current evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ return self.name, float(self.base_acc.result().numpy()) class TopKAccuracy(EvalMetric): """ Computes top k predictions accuracy. Parameters: ---------- top_k : int, default 1 Whether targets are in top k predictions. name : str, default 'top_k_accuracy' Name of this metric instance for display. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. """ def __init__(self, top_k=1, name="top_k_accuracy", output_names=None, label_names=None): super(TopKAccuracy, self).__init__( name, top_k=top_k, output_names=output_names, label_names=label_names, has_global_stats=True) self.top_k = top_k assert (self.top_k > 1), "Please use Accuracy if top_k is no more than 1" self.name += "_{:d}".format(self.top_k) self.base_acc = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5, name="topk_acc") def update(self, labels, preds): """ Updates the internal evaluation result. Parameters: ---------- labels : tensor The labels of the data. preds : tensor Predicted values. """ self.base_acc.update_state(labels, preds) def get(self): """ Gets the current evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ return self.name, float(self.base_acc.result().numpy()) class Top1Error(Accuracy): """ Computes top-1 error (inverted accuracy classification score). Parameters: ---------- axis : int, default 1 The axis that represents classes. name : str, default 'top_1_error' Name of this metric instance for display. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. """ def __init__(self, axis=1, name="top_1_error", output_names=None, label_names=None): super(Top1Error, self).__init__( axis=axis, name=name, output_names=output_names, label_names=label_names) def get(self): """ Gets the current evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ return self.name, 1.0 - float(self.base_acc.result().numpy()) class TopKError(TopKAccuracy): """ Computes top-k error (inverted top k predictions accuracy). Parameters: ---------- top_k : int Whether targets are out of top k predictions, default 1 name : str, default 'top_k_error' Name of this metric instance for display. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. """ def __init__(self, top_k=1, name="top_k_error", output_names=None, label_names=None): name_ = name super(TopKError, self).__init__( top_k=top_k, name=name, output_names=output_names, label_names=label_names) self.name = name_.replace("_k_", "_{}_".format(top_k)) def get(self): """ Gets the current evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ return self.name, 1.0 - float(self.base_acc.result().numpy())
6,552
29.621495
95
py
imgclsmob
imgclsmob-master/tensorflow2/metrics/__init__.py
0
0
0
py
imgclsmob
imgclsmob-master/tensorflow2/metrics/det_metrics.py
""" Evaluation Metrics for Object Detection. """ import warnings import numpy as np import mxnet as mx __all__ = ['CocoDetMApMetric'] class CocoDetMApMetric(mx.metric.EvalMetric): """ Detection metric for COCO bbox task. Parameters: ---------- img_height : int Processed image height. coco_annotations_file_path : str COCO anotation file path. contiguous_id_to_json : list of int Processed IDs. validation_ids : bool, default False Whether to use temporary file for estimation. use_file : bool, default False Whether to use temporary file for estimation. score_thresh : float, default 0.05 Detection results with confident scores smaller than `score_thresh` will be discarded before saving to results. data_shape : tuple of int, default is None If `data_shape` is provided as (height, width), we will rescale bounding boxes when saving the predictions. This is helpful when SSD/YOLO box predictions cannot be rescaled conveniently. Note that the data_shape must be fixed for all validation images. post_affine : a callable function with input signature (orig_w, orig_h, out_w, out_h) If not None, the bounding boxes will be affine transformed rather than simply scaled. name : str, default 'mAP' Name of this metric instance for display. """ def __init__(self, img_height, coco_annotations_file_path, contiguous_id_to_json, validation_ids=None, use_file=False, score_thresh=0.05, data_shape=None, post_affine=None, name="mAP"): super(CocoDetMApMetric, self).__init__(name=name) self.img_height = img_height self.coco_annotations_file_path = coco_annotations_file_path self.contiguous_id_to_json = contiguous_id_to_json self.validation_ids = validation_ids self.use_file = use_file self.score_thresh = score_thresh self.current_idx = 0 self.coco_result = [] if isinstance(data_shape, (tuple, list)): assert len(data_shape) == 2, "Data shape must be (height, width)" elif not data_shape: data_shape = None else: raise ValueError("data_shape must be None or tuple of int as (height, width)") self._data_shape = data_shape if post_affine is not None: assert self._data_shape is not None, "Using post affine transform requires data_shape" self._post_affine = post_affine else: self._post_affine = None from pycocotools.coco import COCO self.gt = COCO(self.coco_annotations_file_path) self._img_ids = sorted(self.gt.getImgIds()) def reset(self): self.current_idx = 0 self.coco_result = [] def get(self): """ Get evaluation metrics. """ if self.current_idx != len(self._img_ids): warnings.warn("Recorded {} out of {} validation images, incomplete results".format( self.current_idx, len(self._img_ids))) from pycocotools.coco import COCO gt = COCO(self.coco_annotations_file_path) import tempfile import json with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f: json.dump(self.coco_result, f) f.flush() pred = gt.loadRes(f.name) from pycocotools.cocoeval import COCOeval coco_eval = COCOeval(gt, pred, "bbox") if self.validation_ids is not None: coco_eval.params.imgIds = self.validation_ids coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() return self.name, tuple(coco_eval.stats[:3]) def update2(self, pred_bboxes, pred_labels, pred_scores): """ Update internal buffer with latest predictions. Note that the statistics are not available until you call self.get() to return the metrics. Parameters: ---------- pred_bboxes : mxnet.NDArray or numpy.ndarray Prediction bounding boxes with shape `B, N, 4`. Where B is the size of mini-batch, N is the number of bboxes. pred_labels : mxnet.NDArray or numpy.ndarray Prediction bounding boxes labels with shape `B, N`. pred_scores : mxnet.NDArray or numpy.ndarray Prediction bounding boxes scores with shape `B, N`. """ def as_numpy(a): """ Convert a (list of) mx.NDArray into numpy.ndarray """ if isinstance(a, (list, tuple)): out = [x.asnumpy() if isinstance(x, mx.nd.NDArray) else x for x in a] return np.concatenate(out, axis=0) elif isinstance(a, mx.nd.NDArray): a = a.asnumpy() return a for pred_bbox, pred_label, pred_score in zip(*[as_numpy(x) for x in [pred_bboxes, pred_labels, pred_scores]]): valid_pred = np.where(pred_label.flat >= 0)[0] pred_bbox = pred_bbox[valid_pred, :].astype(np.float) pred_label = pred_label.flat[valid_pred].astype(int) pred_score = pred_score.flat[valid_pred].astype(np.float) imgid = self._img_ids[self.current_idx] self.current_idx += 1 affine_mat = None if self._data_shape is not None: entry = self.gt.loadImgs(imgid)[0] orig_height = entry["height"] orig_width = entry["width"] height_scale = float(orig_height) / self._data_shape[0] width_scale = float(orig_width) / self._data_shape[1] if self._post_affine is not None: affine_mat = self._post_affine(orig_width, orig_height, self._data_shape[1], self._data_shape[0]) else: height_scale, width_scale = (1.0, 1.0) # for each bbox detection in each image for bbox, label, score in zip(pred_bbox, pred_label, pred_score): if label not in self.contiguous_id_to_json: # ignore non-exist class continue if score < self.score_thresh: continue category_id = self.contiguous_id_to_json[label] # rescale bboxes/affine transform bboxes if affine_mat is not None: bbox[0:2] = self.affine_transform(bbox[0:2], affine_mat) bbox[2:4] = self.affine_transform(bbox[2:4], affine_mat) else: bbox[[0, 2]] *= width_scale bbox[[1, 3]] *= height_scale # convert [xmin, ymin, xmax, ymax] to [xmin, ymin, w, h] bbox[2:4] -= (bbox[:2] - 1) self.coco_result.append({"image_id": imgid, "category_id": category_id, "bbox": bbox[:4].tolist(), "score": score}) def update(self, labels, preds): det_bboxes = [] det_ids = [] det_scores = [] for x_rr, y in zip(preds, labels): bboxes = x_rr.slice_axis(axis=-1, begin=0, end=4) ids = x_rr.slice_axis(axis=-1, begin=4, end=5).squeeze(axis=2) scores = x_rr.slice_axis(axis=-1, begin=5, end=6).squeeze(axis=2) det_ids.append(ids) det_scores.append(scores) # clip to image size det_bboxes.append(bboxes.clip(0, self.img_height)) self.update2(det_bboxes, det_ids, det_scores) @staticmethod def affine_transform(pt, t): """ Apply affine transform to a bounding box given transform matrix t. Parameters: ---------- pt : numpy.ndarray Bounding box with shape (1, 2). t : numpy.ndarray Transformation matrix with shape (2, 3). Returns: ------- numpy.ndarray New bounding box with shape (1, 2). """ new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T new_pt = np.dot(t, new_pt) return new_pt[:2]
8,392
38.219626
119
py
imgclsmob
imgclsmob-master/tensorflow2/metrics/hpe_metrics.py
""" Evaluation Metrics for Human Pose Estimation. """ from .metric import EvalMetric __all__ = ['CocoHpeOksApMetric'] class CocoHpeOksApMetric(EvalMetric): """ Detection metric for COCO bbox task. Parameters: ---------- coco_annotations_file_path : str COCO anotation file path. pose_postprocessing_fn : func An function for pose post-processing. use_file : bool, default False Whether to use temporary file for estimation. validation_ids : bool, default False Whether to use temporary file for estimation. name : str, default 'CocoOksAp' Name of this metric instance for display. """ def __init__(self, coco_annotations_file_path, pose_postprocessing_fn, validation_ids=None, use_file=False, name="CocoOksAp"): super(CocoHpeOksApMetric, self).__init__(name=name) self.coco_annotations_file_path = coco_annotations_file_path self.pose_postprocessing_fn = pose_postprocessing_fn self.validation_ids = validation_ids self.use_file = use_file self.coco_result = [] def reset(self): self.coco_result = [] def get(self): """ Get evaluation metrics. """ import copy from pycocotools.coco import COCO gt = COCO(self.coco_annotations_file_path) if self.use_file: import tempfile import json with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f: json.dump(self.coco_result, f) f.flush() pred = gt.loadRes(f.name) else: def calc_pred(coco, anns): import numpy as np import copy pred = COCO() pred.dataset["images"] = [img for img in coco.dataset["images"]] annsImgIds = [ann["image_id"] for ann in anns] assert set(annsImgIds) == (set(annsImgIds) & set(coco.getImgIds())) pred.dataset["categories"] = copy.deepcopy(coco.dataset["categories"]) for id, ann in enumerate(anns): s = ann["keypoints"] x = s[0::3] y = s[1::3] x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y) ann["area"] = (x1 - x0) * (y1 - y0) ann["id"] = id + 1 ann["bbox"] = [x0, y0, x1 - x0, y1 - y0] pred.dataset["annotations"] = anns pred.createIndex() return pred pred = calc_pred(gt, copy.deepcopy(self.coco_result)) from pycocotools.cocoeval import COCOeval coco_eval = COCOeval(gt, pred, "keypoints") if self.validation_ids is not None: coco_eval.params.imgIds = self.validation_ids coco_eval.params.useSegm = None coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() return self.name, tuple(coco_eval.stats[:3]) def update(self, labels, preds): """ Updates the internal evaluation result. Parameters: ---------- labels : tensor The labels of the data. preds : tensor Predicted values. """ label = labels.numpy() pred = preds.numpy() pred_pts_score, pred_person_score, label_img_id = self.pose_postprocessing_fn(pred, label) for idx in range(len(pred_pts_score)): image_id = int(label_img_id[idx]) kpt = pred_pts_score[idx].flatten().tolist() score = float(pred_person_score[idx]) self.coco_result.append({ "image_id": image_id, "category_id": 1, "keypoints": kpt, "score": score})
3,920
31.675
98
py
imgclsmob
imgclsmob-master/tensorflow2/metrics/metric.py
""" Several base metrics. """ __all__ = ['EvalMetric', 'CompositeEvalMetric', 'check_label_shapes'] from collections import OrderedDict def check_label_shapes(labels, preds, shape=False): """ Helper function for checking shape of label and prediction. Parameters: ---------- labels : list of tensor The labels of the data. preds : list of tensor Predicted values. shape : boolean If True, check the shape of labels and preds, otherwise only check their length. """ if not shape: label_shape, pred_shape = len(labels), len(preds) else: label_shape, pred_shape = labels.shape, preds.shape if label_shape != pred_shape: raise ValueError("Shape of labels {} does not match shape of predictions {}".format(label_shape, pred_shape)) class EvalMetric(object): """ Base class for all evaluation metrics. Parameters: ---------- name : str Name of this metric instance for display. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. """ def __init__(self, name, output_names=None, label_names=None, **kwargs): super(EvalMetric, self).__init__() self.name = str(name) self.output_names = output_names self.label_names = label_names self._has_global_stats = kwargs.pop("has_global_stats", False) self._kwargs = kwargs self.reset() def __str__(self): return "EvalMetric: {}".format(dict(self.get_name_value())) def get_config(self): """ Save configurations of metric. Can be recreated from configs with metric.create(**config). """ config = self._kwargs.copy() config.update({ "metric": self.__class__.__name__, "name": self.name, "output_names": self.output_names, "label_names": self.label_names}) return config def update_dict(self, label, pred): """ Update the internal evaluation with named label and pred. Parameters: ---------- labels : OrderedDict of str -> tensor name to array mapping for labels. preds : OrderedDict of str -> tensor name to array mapping of predicted outputs. """ if self.output_names is not None: pred = [pred[name] for name in self.output_names] else: pred = list(pred.values()) if self.label_names is not None: label = [label[name] for name in self.label_names] else: label = list(label.values()) self.update(label, pred) def update(self, labels, preds): """ Updates the internal evaluation result. Parameters: ---------- labels : tensor The labels of the data. preds : tensor Predicted values. """ raise NotImplementedError() def reset(self): """ Resets the internal evaluation result to initial state. """ self.num_inst = 0 self.sum_metric = 0.0 self.global_num_inst = 0 self.global_sum_metric = 0.0 def reset_local(self): """ Resets the local portion of the internal evaluation results to initial state. """ self.num_inst = 0 self.sum_metric = 0.0 def get(self): """ Gets the current evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ if self.num_inst == 0: return self.name, float("nan") else: return self.name, self.sum_metric / self.num_inst def get_global(self): """ Gets the current global evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ if self._has_global_stats: if self.global_num_inst == 0: return self.name, float("nan") else: return self.name, self.global_sum_metric / self.global_num_inst else: return self.get() def get_name_value(self): """ Returns zipped name and value pairs. Returns: ------- list of tuples A (name, value) tuple list. """ name, value = self.get() if not isinstance(name, list): name = [name] if not isinstance(value, list): value = [value] return list(zip(name, value)) def get_global_name_value(self): """ Returns zipped name and value pairs for global results. Returns: ------- list of tuples A (name, value) tuple list. """ if self._has_global_stats: name, value = self.get_global() if not isinstance(name, list): name = [name] if not isinstance(value, list): value = [value] return list(zip(name, value)) else: return self.get_name_value() class CompositeEvalMetric(EvalMetric): """ Manages multiple evaluation metrics. Parameters: ---------- name : str, default 'composite' Name of this metric instance for display. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. """ def __init__(self, name="composite", output_names=None, label_names=None): super(CompositeEvalMetric, self).__init__( name, output_names=output_names, label_names=label_names, has_global_stats=True) self.metrics = [] def add(self, metric): """ Adds a child metric. Parameters: ---------- metric A metric instance. """ self.metrics.append(metric) def update_dict(self, labels, preds): if self.label_names is not None: labels = OrderedDict([i for i in labels.items() if i[0] in self.label_names]) if self.output_names is not None: preds = OrderedDict([i for i in preds.items() if i[0] in self.output_names]) for metric in self.metrics: metric.update_dict(labels, preds) def update(self, labels, preds): """ Updates the internal evaluation result. Parameters: ---------- labels : tensor The labels of the data. preds : tensor Predicted values. """ for metric in self.metrics: metric.update(labels, preds) def reset(self): """ Resets the internal evaluation result to initial state. """ try: for metric in self.metrics: metric.reset() except AttributeError: pass def reset_local(self): """ Resets the local portion of the internal evaluation results to initial state. """ try: for metric in self.metrics: metric.reset_local() except AttributeError: pass def get(self): """ Returns the current evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ names = [] values = [] for metric in self.metrics: name, value = metric.get() name = [name] value = [value] names.extend(name) values.extend(value) return names, values def get_global(self): """ Returns the current evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ names = [] values = [] for metric in self.metrics: name, value = metric.get_global() name = [name] value = [value] names.extend(name) values.extend(value) return names, values def get_config(self): config = super(CompositeEvalMetric, self).get_config() config.update({"metrics": [i.get_config() for i in self.metrics]}) return config
9,241
27.176829
117
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/imagenet1k_cls_dataset.py
""" ImageNet-1K classification dataset. """ __all__ = ['ImageNet1KMetaInfo', 'load_image_imagenet1k_val'] import os import math import cv2 import numpy as np from PIL import Image from tensorflow.keras.preprocessing.image import ImageDataGenerator import keras_preprocessing as keras_prep from .dataset_metainfo import DatasetMetaInfo from .cls_dataset import img_normalization class ImageNet1KMetaInfo(DatasetMetaInfo): """ Descriptor of ImageNet-1K dataset. """ def __init__(self): super(ImageNet1KMetaInfo, self).__init__() self.label = "ImageNet1K" self.short_label = "imagenet" self.root_dir_name = "imagenet" self.dataset_class = None self.num_training_samples = None self.in_channels = 3 self.num_classes = 1000 self.input_image_size = (224, 224) self.resize_inv_factor = 0.875 self.train_metric_capts = ["Train.Top1"] self.train_metric_names = ["Top1Error"] self.train_metric_extra_kwargs = [{"name": "err-top1"}] self.val_metric_capts = ["Val.Top1", "Val.Top5"] self.val_metric_names = ["Top1Error", "TopKError"] self.val_metric_extra_kwargs = [{"name": "err-top1"}, {"name": "err-top5", "top_k": 5}] self.saver_acc_ind = 1 self.train_transform = imagenet_train_transform self.val_transform = imagenet_val_transform self.test_transform = imagenet_val_transform self.train_generator = imagenet_train_generator self.val_generator = imagenet_val_generator self.test_generator = imagenet_val_generator self.ml_type = "imgcls" self.mean_rgb = (0.485, 0.456, 0.406) self.std_rgb = (0.229, 0.224, 0.225) self.interpolation = "bilinear" self.interpolation_msg = "bilinear" def add_dataset_parser_arguments(self, parser, work_dir_path): """ Create python script parameters (for ImageNet-1K dataset metainfo). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. work_dir_path : str Path to working directory. """ super(ImageNet1KMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--input-size", type=int, default=self.input_image_size[0], help="size of the input for model") parser.add_argument( "--resize-inv-factor", type=float, default=self.resize_inv_factor, help="inverted ratio for input image crop") parser.add_argument( "--mean-rgb", nargs=3, type=float, default=self.mean_rgb, help="Mean of RGB channels in the dataset") parser.add_argument( "--std-rgb", nargs=3, type=float, default=self.std_rgb, help="STD of RGB channels in the dataset") parser.add_argument( "--interpolation", type=str, default=self.interpolation, help="Preprocessing interpolation") def update(self, args): """ Update ImageNet-1K dataset metainfo after user customizing. Parameters: ---------- args : ArgumentParser Main script arguments. """ super(ImageNet1KMetaInfo, self).update(args) self.input_image_size = (args.input_size, args.input_size) self.mean_rgb = args.mean_rgb self.std_rgb = args.std_rgb self.interpolation = args.interpolation if self.interpolation == "nearest": self.interpolation_msg = self.interpolation else: self.interpolation_msg = "{}:{}".format(self.interpolation, self.resize_inv_factor) import keras_preprocessing as keras_prep keras_prep.image.iterator.load_img = load_image_imagenet1k_val def resize(img, size, interpolation): """ Resize the input PIL Image to the given size via OpenCV. Parameters: ---------- img : PIL.Image input image. size : int or tuple of (W, H) Size of output image. interpolation : int Interpolation method for resizing. Returns: ------- PIL.Image Resulted image. """ if interpolation == Image.NEAREST: cv_interpolation = cv2.INTER_NEAREST elif interpolation == Image.BILINEAR: cv_interpolation = cv2.INTER_LINEAR elif interpolation == Image.BICUBIC: cv_interpolation = cv2.INTER_CUBIC elif interpolation == Image.LANCZOS: cv_interpolation = cv2.INTER_LANCZOS4 else: raise ValueError("Invalid interpolation method: {}", interpolation) cv_img = np.array(img) if isinstance(size, int): w, h = img.size if (w <= h and w == size) or (h <= w and h == size): return img if w < h: out_size = (size, int(size * h / w)) else: out_size = (int(size * w / h), size) cv_img = cv2.resize(cv_img, dsize=out_size, interpolation=cv_interpolation) return Image.fromarray(cv_img) else: cv_img = cv2.resize(cv_img, dsize=size, interpolation=cv_interpolation) return Image.fromarray(cv_img) def center_crop(img, output_size): """ Crop the given PIL Image. Parameters: ---------- img : PIL.Image input image. output_size : tuple of (W, H) Size of output image. Returns: ------- PIL.Image Resulted image. """ if isinstance(output_size, int): output_size = (int(output_size), int(output_size)) w, h = img.size th, tw = output_size i = int(round((h - th) / 2.)) j = int(round((w - tw) / 2.)) return img.crop((j, i, j + tw, i + th)) def load_image_imagenet1k_val(path, grayscale=False, color_mode="rgb", target_size=None, interpolation="nearest"): """ Wraps keras_preprocessing.image.utils.load_img and apply center crop as in ImageNet-1K validation procedure. # Arguments path: Path to image file. color_mode: One of "grayscale", 'rgb', 'rgba'. Default: 'rgb'. The desired image format. target_size: Either `None` (default to original size) or tuple of ints `(img_height, img_width)`. interpolation: Interpolation and crop methods used to resample and crop the image if the target size is different from that of the loaded image. Methods are delimited by ":" where first part is interpolation and second is an inverted ratio for input image crop, e.g. 'lanczos:0.875'. Supported interpolation methods are 'nearest', 'bilinear', 'bicubic', 'lanczos', 'box', 'hamming' By default, 'nearest' is used. # Returns: A PIL Image instance. # Raises ImportError: if PIL is not available. ValueError: if interpolation method is not supported. """ interpolation, resize_inv_factor = interpolation.split(":") if ":" in interpolation else (interpolation, "none") if resize_inv_factor == "none": return keras_prep.image.utils.load_img( path=path, grayscale=grayscale, color_mode=color_mode, target_size=target_size, interpolation=interpolation) img = keras_prep.image.utils.load_img( path=path, grayscale=grayscale, color_mode=color_mode, target_size=None, interpolation=interpolation) if (target_size is None) or (img.size == (target_size[1], target_size[0])): return img try: resize_inv_factor = float(resize_inv_factor) except ValueError: raise ValueError("Invalid crop inverted ratio: {}", resize_inv_factor) if interpolation not in keras_prep.image.utils._PIL_INTERPOLATION_METHODS: raise ValueError("Invalid interpolation method {} specified. Supported methods are {}".format( interpolation, ", ".join(keras_prep.image.utils._PIL_INTERPOLATION_METHODS.keys()))) resample = keras_prep.image.utils._PIL_INTERPOLATION_METHODS[interpolation] resize_value = int(math.ceil(float(target_size[0]) / resize_inv_factor)) img = resize( img=img, size=resize_value, interpolation=resample) return center_crop( img=img, output_size=target_size) def imagenet_train_transform(ds_metainfo, data_format="channels_last"): """ Create image transform sequence for training subset. Parameters: ---------- ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- ImageDataGenerator Image transform sequence. """ data_generator = ImageDataGenerator( preprocessing_function=(lambda img: img_normalization( img=img, mean_rgb=ds_metainfo.mean_rgb, std_rgb=ds_metainfo.std_rgb)), shear_range=0.2, zoom_range=0.2, horizontal_flip=True, data_format=data_format) return data_generator def imagenet_val_transform(ds_metainfo, data_format="channels_last"): """ Create image transform sequence for validation subset. Parameters: ---------- ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- ImageDataGenerator Image transform sequence. """ data_generator = ImageDataGenerator( preprocessing_function=(lambda img: img_normalization( img=img, mean_rgb=ds_metainfo.mean_rgb, std_rgb=ds_metainfo.std_rgb)), data_format=data_format) return data_generator def imagenet_train_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for training subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ split = "train" root = ds_metainfo.root_dir_path root = os.path.join(root, split) generator = data_generator.flow_from_directory( directory=root, target_size=ds_metainfo.input_image_size, class_mode="binary", batch_size=batch_size, shuffle=False, interpolation=ds_metainfo.interpolation_msg) return generator def imagenet_val_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for validation subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ split = "val" root = ds_metainfo.root_dir_path root = os.path.join(root, split) generator = data_generator.flow_from_directory( directory=root, target_size=ds_metainfo.input_image_size, class_mode="binary", batch_size=batch_size, shuffle=False, interpolation=ds_metainfo.interpolation_msg) return generator
11,999
30.496063
116
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/coco_hpe1_dataset.py
""" COCO keypoint detection (2D single human pose estimation) dataset. """ import os import threading import copy import cv2 import numpy as np from tensorflow.keras.preprocessing.image import ImageDataGenerator, DirectoryIterator from .dataset_metainfo import DatasetMetaInfo class CocoHpe1Dataset(object): """ COCO keypoint detection (2D single human pose estimation) dataset. Parameters: ---------- root : string Path to `annotations`, `train2017`, and `val2017` folders. mode : string, default 'train' 'train', 'val', 'test', or 'demo'. transform : callable, optional A function that transforms the image. splits : list of str, default ['person_keypoints_val2017'] Json annotations name. Candidates can be: person_keypoints_val2017, person_keypoints_train2017. check_centers : bool, default is False If true, will force check centers of bbox and keypoints, respectively. If centers are far away from each other, remove this label. skip_empty : bool, default is False Whether skip entire image if no valid label is found. Use `False` if this dataset is for validation to avoid COCO metric error. """ CLASSES = ["person"] KEYPOINTS = { 0: "nose", 1: "left_eye", 2: "right_eye", 3: "left_ear", 4: "right_ear", 5: "left_shoulder", 6: "right_shoulder", 7: "left_elbow", 8: "right_elbow", 9: "left_wrist", 10: "right_wrist", 11: "left_hip", 12: "right_hip", 13: "left_knee", 14: "right_knee", 15: "left_ankle", 16: "right_ankle" } SKELETON = [ [16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]] def __init__(self, root, mode="train", transform=None, splits=("person_keypoints_val2017",), check_centers=False, skip_empty=True): self._root = os.path.expanduser(root) self.mode = mode self.transform = transform self.num_class = len(self.CLASSES) if isinstance(splits, str): splits = [splits] self._splits = splits self._coco = [] self._check_centers = check_centers self._skip_empty = skip_empty self.index_map = dict(zip(type(self).CLASSES, range(self.num_class))) self.json_id_to_contiguous = None self.contiguous_id_to_json = None self._items, self._labels = self._load_jsons() mode_name = "train" if mode == "train" else "val" annotations_dir_path = os.path.join(root, "annotations") annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json") self.annotations_file_path = annotations_file_path def __str__(self): detail = ",".join([str(s) for s in self._splits]) return self.__class__.__name__ + "(" + detail + ")" @property def classes(self): """ Category names. """ return type(self).CLASSES @property def num_joints(self): """ Dataset defined: number of joints provided. """ return 17 @property def joint_pairs(self): """ Joint pairs which defines the pairs of joint to be swapped when the image is flipped horizontally. """ return [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]] @property def coco(self): """ Return pycocotools object for evaluation purposes. """ if not self._coco: raise ValueError("No coco objects found, dataset not initialized.") if len(self._coco) > 1: raise NotImplementedError( "Currently we don't support evaluating {} JSON files".format(len(self._coco))) return self._coco[0] def __len__(self): return len(self._items) def __getitem__(self, idx): img_path = self._items[idx] img_id = int(os.path.splitext(os.path.basename(img_path))[0]) label = copy.deepcopy(self._labels[idx]) # img = mx.image.imread(img_path, 1) img = cv2.imread(img_path, flags=cv2.IMREAD_COLOR) img = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB) if self.transform is not None: img, scale, center, score = self.transform(img, label) # print("center={}".format(center)) # print("scale={}".format(scale)) res_label = np.array([float(img_id)] + [float(score)] + list(center) + list(scale), np.float32) return img, res_label def _load_jsons(self): """ Load all image paths and labels from JSON annotation files into buffer. """ items = [] labels = [] from pycocotools.coco import COCO for split in self._splits: anno = os.path.join(self._root, "annotations", split) + ".json" _coco = COCO(anno) self._coco.append(_coco) classes = [c["name"] for c in _coco.loadCats(_coco.getCatIds())] if not classes == self.classes: raise ValueError("Incompatible category names with COCO: ") assert classes == self.classes json_id_to_contiguous = { v: k for k, v in enumerate(_coco.getCatIds())} if self.json_id_to_contiguous is None: self.json_id_to_contiguous = json_id_to_contiguous self.contiguous_id_to_json = { v: k for k, v in self.json_id_to_contiguous.items()} else: assert self.json_id_to_contiguous == json_id_to_contiguous # iterate through the annotations image_ids = sorted(_coco.getImgIds()) for entry in _coco.loadImgs(image_ids): dirname, filename = entry["coco_url"].split("/")[-2:] abs_path = os.path.join(self._root, dirname, filename) if not os.path.exists(abs_path): raise IOError("Image: {} not exists.".format(abs_path)) label = self._check_load_keypoints(_coco, entry) if not label: continue # num of items are relative to person, not image for obj in label: items.append(abs_path) labels.append(obj) return items, labels def _check_load_keypoints(self, coco, entry): """ Check and load ground-truth keypoints. """ ann_ids = coco.getAnnIds(imgIds=entry["id"], iscrowd=False) objs = coco.loadAnns(ann_ids) # check valid bboxes valid_objs = [] width = entry["width"] height = entry["height"] for obj in objs: contiguous_cid = self.json_id_to_contiguous[obj["category_id"]] if contiguous_cid >= self.num_class: # not class of interest continue if max(obj["keypoints"]) == 0: continue # convert from (x, y, w, h) to (xmin, ymin, xmax, ymax) and clip bound xmin, ymin, xmax, ymax = self.bbox_clip_xyxy(self.bbox_xywh_to_xyxy(obj["bbox"]), width, height) # require non-zero box area if obj['area'] <= 0 or xmax <= xmin or ymax <= ymin: continue # joints 3d: (num_joints, 3, 2); 3 is for x, y, z; 2 is for position, visibility joints_3d = np.zeros((self.num_joints, 3, 2), dtype=np.float32) for i in range(self.num_joints): joints_3d[i, 0, 0] = obj["keypoints"][i * 3 + 0] joints_3d[i, 1, 0] = obj["keypoints"][i * 3 + 1] # joints_3d[i, 2, 0] = 0 visible = min(1, obj["keypoints"][i * 3 + 2]) joints_3d[i, :2, 1] = visible # joints_3d[i, 2, 1] = 0 if np.sum(joints_3d[:, 0, 1]) < 1: # no visible keypoint continue if self._check_centers: bbox_center, bbox_area = self._get_box_center_area((xmin, ymin, xmax, ymax)) kp_center, num_vis = self._get_keypoints_center_count(joints_3d) ks = np.exp(-2 * np.sum(np.square(bbox_center - kp_center)) / bbox_area) if (num_vis / 80.0 + 47 / 80.0) > ks: continue valid_objs.append({ "bbox": (xmin, ymin, xmax, ymax), "joints_3d": joints_3d }) if not valid_objs: if not self._skip_empty: # dummy invalid labels if no valid objects are found valid_objs.append({ "bbox": np.array([-1, -1, 0, 0]), "joints_3d": np.zeros((self.num_joints, 3, 2), dtype=np.float32) }) return valid_objs @staticmethod def _get_box_center_area(bbox): """ Get bbox center. """ c = np.array([(bbox[0] + bbox[2]) / 2.0, (bbox[1] + bbox[3]) / 2.0]) area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0]) return c, area @staticmethod def _get_keypoints_center_count(keypoints): """ Get geometric center of all keypoints. """ keypoint_x = np.sum(keypoints[:, 0, 0] * (keypoints[:, 0, 1] > 0)) keypoint_y = np.sum(keypoints[:, 1, 0] * (keypoints[:, 1, 1] > 0)) num = float(np.sum(keypoints[:, 0, 1])) return np.array([keypoint_x / num, keypoint_y / num]), num @staticmethod def bbox_clip_xyxy(xyxy, width, height): """ Clip bounding box with format (xmin, ymin, xmax, ymax) to specified boundary. All bounding boxes will be clipped to the new region `(0, 0, width, height)`. Parameters: ---------- xyxy : list, tuple or numpy.ndarray The bbox in format (xmin, ymin, xmax, ymax). If numpy.ndarray is provided, we expect multiple bounding boxes with shape `(N, 4)`. width : int or float Boundary width. height : int or float Boundary height. Returns: ------- tuple or np.array Description of returned object. """ if isinstance(xyxy, (tuple, list)): if not len(xyxy) == 4: raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xyxy))) x1 = np.minimum(width - 1, np.maximum(0, xyxy[0])) y1 = np.minimum(height - 1, np.maximum(0, xyxy[1])) x2 = np.minimum(width - 1, np.maximum(0, xyxy[2])) y2 = np.minimum(height - 1, np.maximum(0, xyxy[3])) return x1, y1, x2, y2 elif isinstance(xyxy, np.ndarray): if not xyxy.size % 4 == 0: raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xyxy.shape)) x1 = np.minimum(width - 1, np.maximum(0, xyxy[:, 0])) y1 = np.minimum(height - 1, np.maximum(0, xyxy[:, 1])) x2 = np.minimum(width - 1, np.maximum(0, xyxy[:, 2])) y2 = np.minimum(height - 1, np.maximum(0, xyxy[:, 3])) return np.hstack((x1, y1, x2, y2)) else: raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xyxy))) @staticmethod def bbox_xywh_to_xyxy(xywh): """ Convert bounding boxes from format (xmin, ymin, w, h) to (xmin, ymin, xmax, ymax) Parameters: ---------- xywh : list, tuple or numpy.ndarray The bbox in format (x, y, w, h). If numpy.ndarray is provided, we expect multiple bounding boxes with shape `(N, 4)`. Returns: ------- tuple or np.ndarray The converted bboxes in format (xmin, ymin, xmax, ymax). If input is numpy.ndarray, return is numpy.ndarray correspondingly. """ if isinstance(xywh, (tuple, list)): if not len(xywh) == 4: raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xywh))) w, h = np.maximum(xywh[2] - 1, 0), np.maximum(xywh[3] - 1, 0) return xywh[0], xywh[1], xywh[0] + w, xywh[1] + h elif isinstance(xywh, np.ndarray): if not xywh.size % 4 == 0: raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xywh.shape)) xyxy = np.hstack((xywh[:, :2], xywh[:, :2] + np.maximum(0, xywh[:, 2:4] - 1))) return xyxy else: raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xywh))) # --------------------------------------------------------------------------------------------------------------------- class CocoHpeValTransform1(object): def __init__(self, ds_metainfo): # print("ds_metainfo.mean_rgb={}".format(ds_metainfo.mean_rgb)) # print("ds_metainfo.std_rgb={}".format(ds_metainfo.std_rgb)) self.ds_metainfo = ds_metainfo self.image_size = self.ds_metainfo.input_image_size height = self.image_size[0] width = self.image_size[1] self.aspect_ratio = float(width / height) self.mean = ds_metainfo.mean_rgb self.std = ds_metainfo.std_rgb def __call__(self, src, label): bbox = label["bbox"] assert len(bbox) == 4 xmin, ymin, xmax, ymax = bbox center, scale = _box_to_center_scale(xmin, ymin, xmax - xmin, ymax - ymin, self.aspect_ratio) score = label.get("score", 1) h, w = self.image_size trans = get_affine_transform(center, scale, 0, [w, h]) img = cv2.warpAffine(src, trans, (int(w), int(h)), flags=cv2.INTER_LINEAR) # img = mx.nd.image.to_tensor(mx.nd.array(img)) # img = mx.nd.image.normalize(img, mean=self.mean, std=self.std) img = img.astype(np.float32) img = img / 255.0 img = (img - np.array(self.mean, np.float32)) / np.array(self.std, np.float32) return img, scale, center, score def _box_to_center_scale(x, y, w, h, aspect_ratio=1.0, scale_mult=1.25): pixel_std = 1 center = np.zeros((2,), dtype=np.float32) center[0] = x + w * 0.5 center[1] = y + h * 0.5 if w > aspect_ratio * h: h = w / aspect_ratio elif w < aspect_ratio * h: w = h * aspect_ratio scale = np.array( [w * 1.0 / pixel_std, h * 1.0 / pixel_std], dtype=np.float32) if center[0] != -1: scale = scale * scale_mult return center, scale def get_dir(src_point, rot_rad): sn, cs = np.sin(rot_rad), np.cos(rot_rad) src_result = [0, 0] src_result[0] = src_point[0] * cs - src_point[1] * sn src_result[1] = src_point[0] * sn + src_point[1] * cs return src_result def crop(img, center, scale, output_size, rot=0): trans = get_affine_transform(center, scale, rot, output_size) dst_img = cv2.warpAffine( img, trans, (int(output_size[0]), int(output_size[1])), flags=cv2.INTER_LINEAR) return dst_img def get_3rd_point(a, b): direct = a - b return b + np.array([-direct[1], direct[0]], dtype=np.float32) def get_affine_transform(center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0): if not isinstance(scale, np.ndarray) and not isinstance(scale, list): scale = np.array([scale, scale]) scale_tmp = scale src_w = scale_tmp[0] dst_w = output_size[0] dst_h = output_size[1] rot_rad = np.pi * rot / 180 src_dir = get_dir([0, src_w * -0.5], rot_rad) dst_dir = np.array([0, dst_w * -0.5], np.float32) src = np.zeros((3, 2), dtype=np.float32) dst = np.zeros((3, 2), dtype=np.float32) src[0, :] = center + scale_tmp * shift src[1, :] = center + src_dir + scale_tmp * shift dst[0, :] = [dst_w * 0.5, dst_h * 0.5] dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir src[2:, :] = get_3rd_point(src[0, :], src[1, :]) dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) if inv: trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) else: trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) return trans # --------------------------------------------------------------------------------------------------------------------- class CocoHpeValTransform2(object): def __init__(self, ds_metainfo): # print("ds_metainfo.mean_rgb={}".format(ds_metainfo.mean_rgb)) # print("ds_metainfo.std_rgb={}".format(ds_metainfo.std_rgb)) self.ds_metainfo = ds_metainfo self.image_size = self.ds_metainfo.input_image_size height = self.image_size[0] width = self.image_size[1] self.aspect_ratio = float(width / height) self.mean = ds_metainfo.mean_rgb self.std = ds_metainfo.std_rgb def __call__(self, src, label): # print(src.shape) bbox = label["bbox"] assert len(bbox) == 4 score = label.get('score', 1) img, scale_box = detector_to_alpha_pose( src, class_ids=np.array([[0.]]), scores=np.array([[1.]]), bounding_boxs=np.array(np.array([bbox])), output_shape=self.image_size) if scale_box.shape[0] == 1: pt1 = np.array(scale_box[0, (0, 1)], dtype=np.float32) pt2 = np.array(scale_box[0, (2, 3)], dtype=np.float32) else: assert scale_box.shape[0] == 4 pt1 = np.array(scale_box[(0, 1)], dtype=np.float32) pt2 = np.array(scale_box[(2, 3)], dtype=np.float32) res_img = img[0].astype(np.float32) res_img = res_img.transpose((1, 2, 0)) return res_img, pt1, pt2, score def detector_to_alpha_pose(img, class_ids, scores, bounding_boxs, output_shape=(256, 192), thr=0.5): boxes, scores = alpha_pose_detection_processor( img=img, boxes=bounding_boxs, class_idxs=class_ids, scores=scores, thr=thr) pose_input, upscale_bbox = alpha_pose_image_cropper( source_img=img, boxes=boxes, output_shape=output_shape) return pose_input, upscale_bbox def alpha_pose_detection_processor(img, boxes, class_idxs, scores, thr=0.5): if len(boxes.shape) == 3: boxes = boxes.squeeze(axis=0) if len(class_idxs.shape) == 3: class_idxs = class_idxs.squeeze(axis=0) if len(scores.shape) == 3: scores = scores.squeeze(axis=0) # cilp coordinates boxes[:, [0, 2]] = np.clip(boxes[:, [0, 2]], 0., img.shape[1] - 1) boxes[:, [1, 3]] = np.clip(boxes[:, [1, 3]], 0., img.shape[0] - 1) # select boxes mask1 = (class_idxs == 0).astype(np.int32) mask2 = (scores > thr).astype(np.int32) picked_idxs = np.where((mask1 + mask2) > 1)[0] if picked_idxs.shape[0] == 0: return None, None else: return boxes[picked_idxs], scores[picked_idxs] def alpha_pose_image_cropper(source_img, boxes, output_shape=(256, 192)): if boxes is None: return None, boxes # crop person poses img_width, img_height = source_img.shape[1], source_img.shape[0] tensors = np.zeros([boxes.shape[0], 3, output_shape[0], output_shape[1]]) out_boxes = np.zeros([boxes.shape[0], 4]) for i, box in enumerate(boxes): img = source_img.copy() box_width = box[2] - box[0] box_height = box[3] - box[1] if box_width > 100: scale_rate = 0.2 else: scale_rate = 0.3 # crop image left = int(max(0, box[0] - box_width * scale_rate / 2)) up = int(max(0, box[1] - box_height * scale_rate / 2)) right = int(min(img_width - 1, max(left + 5, box[2] + box_width * scale_rate / 2))) bottom = int(min(img_height - 1, max(up + 5, box[3] + box_height * scale_rate / 2))) crop_width = right - left if crop_width < 1: continue crop_height = bottom - up if crop_height < 1: continue ul = np.array((left, up)) br = np.array((right, bottom)) img = cv_cropBox(img, ul, br, output_shape[0], output_shape[1]) img = img.astype(np.float32) img = img / 255.0 img = img.transpose((2, 0, 1)) # img = mx.nd.image.to_tensor(np.array(img)) # img = img.transpose((2, 0, 1)) img[0] = img[0] - 0.406 img[1] = img[1] - 0.457 img[2] = img[2] - 0.480 assert (img.shape[0] == 3) tensors[i] = img out_boxes[i] = (left, up, right, bottom) return tensors, out_boxes def cv_cropBox(img, ul, br, resH, resW, pad_val=0): ul = ul br = (br - 1) # br = br.int() lenH = max((br[1] - ul[1]).item(), (br[0] - ul[0]).item() * resH / resW) lenW = lenH * resW / resH if img.ndim == 2: img = img[:, np.newaxis] box_shape = [br[1] - ul[1], br[0] - ul[0]] pad_size = [(lenH - box_shape[0]) // 2, (lenW - box_shape[1]) // 2] # Padding Zeros img[:ul[1], :, :], img[:, :ul[0], :] = pad_val, pad_val img[br[1] + 1:, :, :], img[:, br[0] + 1:, :] = pad_val, pad_val src = np.zeros((3, 2), dtype=np.float32) dst = np.zeros((3, 2), dtype=np.float32) src[0, :] = np.array([ul[0] - pad_size[1], ul[1] - pad_size[0]], np.float32) src[1, :] = np.array([br[0] + pad_size[1], br[1] + pad_size[0]], np.float32) dst[0, :] = 0 dst[1, :] = np.array([resW - 1, resH - 1], np.float32) src[2:, :] = get_3rd_point(src[0, :], src[1, :]) dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) dst_img = cv2.warpAffine(img, trans, (resW, resH), flags=cv2.INTER_LINEAR) return dst_img # --------------------------------------------------------------------------------------------------------------------- def recalc_pose1(keypoints, bbs, image_size): def transform_preds(coords, center, scale, output_size): def affine_transform(pt, t): new_pt = np.array([pt[0], pt[1], 1.]).T new_pt = np.dot(t, new_pt) return new_pt[:2] target_coords = np.zeros(coords.shape) trans = get_affine_transform(center, scale, 0, output_size, inv=1) for p in range(coords.shape[0]): target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans) return target_coords center = bbs[:, :2] scale = bbs[:, 2:4] heatmap_height = image_size[0] // 4 heatmap_width = image_size[1] // 4 output_size = [heatmap_width, heatmap_height] preds = np.zeros_like(keypoints) for i in range(keypoints.shape[0]): preds[i] = transform_preds(keypoints[i], center[i], scale[i], output_size) return preds def recalc_pose1b(pred, label, image_size, visible_conf_threshold=0.0): label_img_id = label[:, 0].astype(np.int32) label_score = label[:, 1] label_bbs = label[:, 2:6] pred_keypoints = pred[:, :, :2] pred_score = pred[:, :, 2] pred[:, :, :2] = recalc_pose1(pred_keypoints, label_bbs, image_size) pred_person_score = [] batch = pred_keypoints.shape[0] num_joints = pred_keypoints.shape[1] for idx in range(batch): kpt_score = 0 count = 0 for i in range(num_joints): mval = float(pred_score[idx][i]) if mval > visible_conf_threshold: kpt_score += mval count += 1 if count > 0: kpt_score /= count kpt_score = kpt_score * float(label_score[idx]) pred_person_score.append(kpt_score) return pred, pred_person_score, label_img_id def recalc_pose2(keypoints, bbs, image_size): def transformBoxInvert(pt, ul, br, resH, resW): center = np.zeros(2) center[0] = (br[0] - 1 - ul[0]) / 2 center[1] = (br[1] - 1 - ul[1]) / 2 lenH = max(br[1] - ul[1], (br[0] - ul[0]) * resH / resW) lenW = lenH * resW / resH _pt = (pt * lenH) / resH if bool(((lenW - 1) / 2 - center[0]) > 0): _pt[0] = _pt[0] - ((lenW - 1) / 2 - center[0]) if bool(((lenH - 1) / 2 - center[1]) > 0): _pt[1] = _pt[1] - ((lenH - 1) / 2 - center[1]) new_point = np.zeros(2) new_point[0] = _pt[0] + ul[0] new_point[1] = _pt[1] + ul[1] return new_point pt2 = bbs[:, :2] pt1 = bbs[:, 2:4] heatmap_height = image_size[0] // 4 heatmap_width = image_size[1] // 4 preds = np.zeros_like(keypoints) for i in range(keypoints.shape[0]): for j in range(keypoints.shape[1]): preds[i, j] = transformBoxInvert(keypoints[i, j], pt1[i], pt2[i], heatmap_height, heatmap_width) return preds def recalc_pose2b(pred, label, image_size, visible_conf_threshold=0.0): label_img_id = label[:, 0].astype(np.int32) label_score = label[:, 1] label_bbs = label[:, 2:6] pred_keypoints = pred[:, :, :2] pred_score = pred[:, :, 2] pred[:, :, :2] = recalc_pose2(pred_keypoints, label_bbs, image_size) pred_person_score = [] batch = pred_keypoints.shape[0] num_joints = pred_keypoints.shape[1] for idx in range(batch): kpt_score = 0 count = 0 for i in range(num_joints): mval = float(pred_score[idx][i]) if mval > visible_conf_threshold: kpt_score += mval count += 1 if count > 0: kpt_score /= count kpt_score = kpt_score * float(label_score[idx]) pred_person_score.append(kpt_score) return pred, pred_person_score, label_img_id # --------------------------------------------------------------------------------------------------------------------- class CocoHpe1MetaInfo(DatasetMetaInfo): def __init__(self): super(CocoHpe1MetaInfo, self).__init__() self.label = "COCO" self.short_label = "coco" self.root_dir_name = "coco" self.dataset_class = CocoHpe1Dataset self.num_training_samples = None self.in_channels = 3 self.num_classes = CocoHpe1Dataset.classes self.input_image_size = (256, 192) self.train_metric_capts = None self.train_metric_names = None self.train_metric_extra_kwargs = None self.val_metric_capts = None self.val_metric_names = None self.test_metric_capts = ["Val.CocoOksAp"] self.test_metric_names = ["CocoHpeOksApMetric"] self.test_metric_extra_kwargs = [ {"name": "OksAp", "coco_annotations_file_path": None, "use_file": False, "pose_postprocessing_fn": lambda x, y: recalc_pose1b(x, y, self.input_image_size)}] self.saver_acc_ind = 0 self.do_transform = True self.test_transform = cocohpe_val_transform self.test_transform2 = CocoHpeValTransform1 self.test_generator = cocohpe_test_generator self.ml_type = "hpe" self.net_extra_kwargs = {} self.mean_rgb = (0.485, 0.456, 0.406) self.std_rgb = (0.229, 0.224, 0.225) self.model_type = 1 def add_dataset_parser_arguments(self, parser, work_dir_path): """ Create python script parameters (for ImageNet-1K dataset metainfo). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. work_dir_path : str Path to working directory. """ super(CocoHpe1MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--input-size", type=int, nargs=2, default=self.input_image_size, help="size of the input for model") parser.add_argument( "--mean-rgb", nargs=3, type=float, default=self.mean_rgb, help="Mean of RGB channels in the dataset") parser.add_argument( "--std-rgb", nargs=3, type=float, default=self.std_rgb, help="STD of RGB channels in the dataset") parser.add_argument( "--model-type", type=int, default=self.model_type, help="model type (1=SimplePose, 2=AlphaPose)") def update(self, args): """ Update ImageNet-1K dataset metainfo after user customizing. Parameters: ---------- args : ArgumentParser Main script arguments. """ super(CocoHpe1MetaInfo, self).update(args) self.input_image_size = args.input_size self.mean_rgb = args.mean_rgb self.std_rgb = args.std_rgb self.model_type = args.model_type if self.model_type == 1: self.test_metric_extra_kwargs[0]["pose_postprocessing_fn"] =\ lambda x, y: recalc_pose1b(x, y, self.input_image_size) self.val_transform2 = CocoHpeValTransform1 self.test_transform2 = CocoHpeValTransform1 else: self.test_metric_extra_kwargs[0]["pose_postprocessing_fn"] =\ lambda x, y: recalc_pose2b(x, y, self.input_image_size) self.val_transform2 = CocoHpeValTransform2 self.test_transform2 = CocoHpeValTransform2 def update_from_dataset(self, dataset): """ Update dataset metainfo after a dataset class instance creation. Parameters: ---------- args : obj A dataset class instance. """ self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path # --------------------------------------------------------------------------------------------------------------------- class CocoHpeDirectoryIterator(DirectoryIterator): allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None} def __init__(self, directory, image_data_generator, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest', dtype='float32', dataset=None): super(CocoHpeDirectoryIterator, self).set_processing_attrs( image_data_generator, target_size, color_mode, data_format, save_to_dir, save_prefix, save_format, subset, interpolation) self.dataset = dataset self.class_mode = class_mode self.dtype = dtype self.n = len(self.dataset) self.batch_size = batch_size self.seed = seed self.shuffle = shuffle self.batch_index = 0 self.total_batches_seen = 0 self.lock = threading.Lock() self.index_array = None self.index_generator = self._flow_index() def _get_batches_of_transformed_samples(self, index_array): """Gets a batch of transformed samples. # Arguments index_array: Array of sample indices to include in batch. # Returns: A batch of transformed samples. """ batch_x = None batch_y = None for i, j in enumerate(index_array): x, y = self.dataset[j] if batch_x is None: batch_x = np.zeros((len(index_array),) + x.shape, dtype=self.dtype) batch_y = np.zeros((len(index_array),) + y.shape, dtype=np.float32) batch_x[i] = x batch_y[i] = y return batch_x, batch_y class CocoHpeImageDataGenerator(ImageDataGenerator): def flow_from_directory(self, directory, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest', dataset=None): return CocoHpeDirectoryIterator( directory, self, target_size=target_size, color_mode=color_mode, classes=classes, class_mode=class_mode, data_format=self.data_format, batch_size=batch_size, shuffle=shuffle, seed=seed, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, follow_links=follow_links, subset=subset, interpolation=interpolation, dataset=dataset) def cocohpe_val_transform(ds_metainfo, data_format="channels_last"): """ Create image transform sequence for validation subset. Parameters: ---------- ds_metainfo : DatasetMetaInfo Pascal VOC2012 dataset metainfo. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- ImageDataGenerator Image transform sequence. """ data_generator = CocoHpeImageDataGenerator( preprocessing_function=(lambda img: ds_metainfo.val_transform2(ds_metainfo=ds_metainfo)(img)), data_format=data_format) return data_generator def cocohpe_val_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for validation subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo Pascal VOC2012 dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ split = "val" root = ds_metainfo.root_dir_path root = os.path.join(root, split) generator = data_generator.flow_from_directory( directory=root, target_size=ds_metainfo.input_image_size, class_mode="binary", batch_size=batch_size, shuffle=False, interpolation="bilinear", dataset=ds_metainfo.dataset_class( root=ds_metainfo.root_dir_path, mode="val", transform=ds_metainfo.val_transform2( ds_metainfo=ds_metainfo))) return generator def cocohpe_test_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for testing subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo Pascal VOC2012 dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ split = "val" root = ds_metainfo.root_dir_path root = os.path.join(root, split) generator = data_generator.flow_from_directory( directory=root, target_size=ds_metainfo.input_image_size, class_mode="binary", batch_size=batch_size, shuffle=False, interpolation="bilinear", dataset=ds_metainfo.dataset_class( root=ds_metainfo.root_dir_path, mode="test", transform=ds_metainfo.test_transform2( ds_metainfo=ds_metainfo))) return generator
37,195
33.282028
119
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/ade20k_seg_dataset.py
""" ADE20K semantic segmentation dataset. """ import os import numpy as np from PIL import Image from .seg_dataset import SegDataset from .voc_seg_dataset import VOCMetaInfo class ADE20KSegDataset(SegDataset): """ ADE20K semantic segmentation dataset. Parameters: ---------- root : str Path to a folder with `ADEChallengeData2016` subfolder. mode : str, default 'train' 'train', 'val', 'test', or 'demo'. transform : callable, optional A function that transforms the image. """ def __init__(self, root, mode="train", transform=None, **kwargs): super(ADE20KSegDataset, self).__init__( root=root, mode=mode, transform=transform, **kwargs) base_dir_path = os.path.join(root, "ADEChallengeData2016") assert os.path.exists(base_dir_path), "Please prepare dataset" image_dir_path = os.path.join(base_dir_path, "images") mask_dir_path = os.path.join(base_dir_path, "annotations") mode_dir_name = "training" if mode == "train" else "validation" image_dir_path = os.path.join(image_dir_path, mode_dir_name) mask_dir_path = os.path.join(mask_dir_path, mode_dir_name) self.images = [] self.masks = [] for image_file_name in os.listdir(image_dir_path): image_file_stem, _ = os.path.splitext(image_file_name) if image_file_name.endswith(".jpg"): image_file_path = os.path.join(image_dir_path, image_file_name) mask_file_name = image_file_stem + ".png" mask_file_path = os.path.join(mask_dir_path, mask_file_name) if os.path.isfile(mask_file_path): self.images.append(image_file_path) self.masks.append(mask_file_path) else: print("Cannot find the mask: {}".format(mask_file_path)) assert (len(self.images) == len(self.masks)) if len(self.images) == 0: raise RuntimeError("Found 0 images in subfolders of: {}\n".format(base_dir_path)) def __getitem__(self, index): image = Image.open(self.images[index]).convert("RGB") # image = mx.image.imread(self.images[index]) if self.mode == "demo": image = self._img_transform(image) if self.transform is not None: image = self.transform(image) return image, os.path.basename(self.images[index]) mask = Image.open(self.masks[index]) # mask = mx.image.imread(self.masks[index]) if self.mode == "train": image, mask = self._sync_transform(image, mask) elif self.mode == "val": image, mask = self._val_sync_transform(image, mask) else: assert (self.mode == "test") image = self._img_transform(image) mask = self._mask_transform(mask) if self.transform is not None: image = self.transform(image) return image, mask classes = 150 vague_idx = 150 use_vague = True background_idx = -1 ignore_bg = False @staticmethod def _mask_transform(mask): np_mask = np.array(mask).astype(np.int32) np_mask[np_mask == 0] = ADE20KSegDataset.vague_idx + 1 np_mask -= 1 return np_mask def __len__(self): return len(self.images) class ADE20KMetaInfo(VOCMetaInfo): def __init__(self): super(ADE20KMetaInfo, self).__init__() self.label = "ADE20K" self.short_label = "voc" self.root_dir_name = "ade20k" self.dataset_class = ADE20KSegDataset self.num_classes = ADE20KSegDataset.classes self.test_metric_extra_kwargs = [ {"vague_idx": ADE20KSegDataset.vague_idx, "use_vague": ADE20KSegDataset.use_vague, "macro_average": False}, {"num_classes": ADE20KSegDataset.classes, "vague_idx": ADE20KSegDataset.vague_idx, "use_vague": ADE20KSegDataset.use_vague, "bg_idx": ADE20KSegDataset.background_idx, "ignore_bg": ADE20KSegDataset.ignore_bg, "macro_average": False}]
4,291
33.894309
93
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/dataset_metainfo.py
""" Base dataset metainfo class. """ import os class DatasetMetaInfo(object): """ Base descriptor of dataset. """ def __init__(self): self.use_imgrec = False self.label = None self.root_dir_name = None self.root_dir_path = None self.dataset_class = None self.dataset_class_extra_kwargs = None self.num_training_samples = None self.in_channels = None self.num_classes = None self.input_image_size = None self.train_metric_capts = None self.train_metric_names = None self.train_metric_extra_kwargs = None self.train_use_weighted_sampler = False self.val_metric_capts = None self.val_metric_names = None self.val_metric_extra_kwargs = None self.test_metric_capts = None self.test_metric_names = None self.test_metric_extra_kwargs = None self.saver_acc_ind = None self.ml_type = None self.allow_hybridize = True self.train_net_extra_kwargs = None self.test_net_extra_kwargs = None self.load_ignore_extra = False def add_dataset_parser_arguments(self, parser, work_dir_path): """ Create python script parameters (for dataset specific metainfo). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. work_dir_path : str Path to working directory. """ parser.add_argument( "--data-dir", type=str, default=os.path.join(work_dir_path, self.root_dir_name), help="path to directory with {} dataset".format(self.label)) parser.add_argument( "--num-classes", type=int, default=self.num_classes, help="number of classes") parser.add_argument( "--in-channels", type=int, default=self.in_channels, help="number of input channels") def update(self, args): """ Update dataset metainfo after user customizing. Parameters: ---------- args : ArgumentParser Main script arguments. """ self.root_dir_path = args.data_dir self.num_classes = args.num_classes self.in_channels = args.in_channels def update_from_dataset(self, dataset): """ Update dataset metainfo after a dataset class instance creation. Parameters: ---------- args : obj A dataset class instance. """ pass
2,733
27.778947
72
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/seg_dataset.py
import random import threading import numpy as np from PIL import Image, ImageOps, ImageFilter from tensorflow.keras.preprocessing.image import ImageDataGenerator, DirectoryIterator class SegDataset(object): """ Segmentation base dataset. Parameters: ---------- root : str Path to data folder. mode : str 'train', 'val', 'test', or 'demo'. transform : callable A function that transforms the image. """ def __init__(self, root, mode, transform, base_size=520, crop_size=480): super(SegDataset, self).__init__() assert (mode in ("train", "val", "test", "demo")) assert (mode in ("test", "demo")) self.root = root self.mode = mode self.transform = transform self.base_size = base_size self.crop_size = crop_size def _val_sync_transform(self, image, mask): outsize = self.crop_size short_size = outsize w, h = image.size if w > h: oh = short_size ow = int(1.0 * w * oh / h) else: ow = short_size oh = int(1.0 * h * ow / w) image = image.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) # center crop w, h = image.size x1 = int(round(0.5 * (w - outsize))) y1 = int(round(0.5 * (h - outsize))) image = image.crop((x1, y1, x1 + outsize, y1 + outsize)) mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize)) # final transform image, mask = self._img_transform(image), self._mask_transform(mask) return image, mask def _sync_transform(self, image, mask): # random mirror if random.random() < 0.5: image = image.transpose(Image.FLIP_LEFT_RIGHT) mask = mask.transpose(Image.FLIP_LEFT_RIGHT) crop_size = self.crop_size # random scale (short edge) short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0)) w, h = image.size if h > w: ow = short_size oh = int(1.0 * h * ow / w) else: oh = short_size ow = int(1.0 * w * oh / h) image = image.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) # pad crop if short_size < crop_size: padh = crop_size - oh if oh < crop_size else 0 padw = crop_size - ow if ow < crop_size else 0 image = ImageOps.expand(image, border=(0, 0, padw, padh), fill=0) mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0) # random crop crop_size w, h = image.size x1 = random.randint(0, w - crop_size) y1 = random.randint(0, h - crop_size) image = image.crop((x1, y1, x1 + crop_size, y1 + crop_size)) mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size)) # gaussian blur as in PSP if random.random() < 0.5: image = image.filter(ImageFilter.GaussianBlur( radius=random.random())) # final transform image, mask = self._img_transform(image), self._mask_transform(mask) return image, mask @staticmethod def _img_transform(image): return np.array(image) @staticmethod def _mask_transform(mask): return np.array(mask).astype(np.int32) def __getitem__(self, index): raise NotImplementedError def __len__(self): raise NotImplementedError class SegDirectoryIterator(DirectoryIterator): allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None} def __init__(self, directory, image_data_generator, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest', dtype='float32', dataset=None): super(SegDirectoryIterator, self).set_processing_attrs( image_data_generator, target_size, color_mode, data_format, save_to_dir, save_prefix, save_format, subset, interpolation) self.dataset = dataset self.class_mode = class_mode self.dtype = dtype self.n = len(self.dataset) self.batch_size = batch_size self.seed = seed self.shuffle = shuffle self.batch_index = 0 self.total_batches_seen = 0 self.lock = threading.Lock() self.index_array = None self.index_generator = self._flow_index() def _get_batches_of_transformed_samples(self, index_array): """Gets a batch of transformed samples. # Arguments index_array: Array of sample indices to include in batch. # Returns: A batch of transformed samples. """ # batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=self.dtype) # batch_y = np.zeros((len(index_array),) + self.image_shape, dtype=np.int32) batch_x = None batch_y = None for i, j in enumerate(index_array): x, y = self.dataset[j] if batch_x is None: batch_x = np.zeros((len(index_array),) + x.shape, dtype=self.dtype) batch_y = np.zeros((len(index_array),) + y.shape, dtype=np.int32) # if self.data_format == "channel_first": # print("*") # print("batch_x.shape={}".format(batch_x.shape)) # print("batch_y.shape={}".format(batch_y.shape)) # print("x.shape={}".format(x.shape)) # print("y.shape={}".format(y.shape)) batch_x[i] = x batch_y[i] = y return batch_x, batch_y class SegImageDataGenerator(ImageDataGenerator): def flow_from_directory(self, directory, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest', dataset=None): return SegDirectoryIterator( directory, self, target_size=target_size, color_mode=color_mode, classes=classes, class_mode=class_mode, data_format=self.data_format, batch_size=batch_size, shuffle=shuffle, seed=seed, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, follow_links=follow_links, subset=subset, interpolation=interpolation, dataset=dataset)
7,631
33.378378
89
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/coco_hpe2_dataset.py
""" COCO keypoint detection (2D multiple human pose estimation) dataset (for Lightweight OpenPose). """ import os import json import math import threading import cv2 from operator import itemgetter import numpy as np from tensorflow.keras.preprocessing.image import ImageDataGenerator, DirectoryIterator from .dataset_metainfo import DatasetMetaInfo class CocoHpe2Dataset(object): """ COCO keypoint detection (2D multiple human pose estimation) dataset. Parameters: ---------- root : string Path to `annotations`, `train2017`, and `val2017` folders. mode : string, default 'train' 'train', 'val', 'test', or 'demo'. transform : callable, optional A function that transforms the image. """ def __init__(self, root, mode="train", transform=None): super(CocoHpe2Dataset, self).__init__() self._root = os.path.expanduser(root) self.mode = mode self.transform = transform mode_name = "train" if mode == "train" else "val" annotations_dir_path = os.path.join(root, "annotations") annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json") with open(annotations_file_path, "r") as f: self.file_names = json.load(f)["images"] self.image_dir_path = os.path.join(root, mode_name + "2017") self.annotations_file_path = annotations_file_path def __str__(self): return self.__class__.__name__ + "(" + self._root + ")" def __len__(self): return len(self.file_names) def __getitem__(self, idx): file_name = self.file_names[idx]["file_name"] image_file_path = os.path.join(self.image_dir_path, file_name) image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR) # image = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB) img_mean = (128, 128, 128) img_scale = 1.0 / 256 base_height = 368 stride = 8 pad_value = (0, 0, 0) height, width, _ = image.shape image = self.normalize(image, img_mean, img_scale) ratio = base_height / float(image.shape[0]) image = cv2.resize(image, (0, 0), fx=ratio, fy=ratio, interpolation=cv2.INTER_CUBIC) min_dims = [base_height, max(image.shape[1], base_height)] image, pad = self.pad_width( image, stride, pad_value, min_dims) image = image.astype(np.float32) # image = image.transpose((2, 0, 1)) # image = torch.from_numpy(image) # if self.transform is not None: # image = self.transform(image) image_id = int(os.path.splitext(os.path.basename(file_name))[0]) label = np.array([image_id, 1.0] + pad + [height, width], np.float32) # label = torch.from_numpy(label) return image, label @staticmethod def normalize(img, img_mean, img_scale): img = np.array(img, dtype=np.float32) img = (img - img_mean) * img_scale return img @staticmethod def pad_width(img, stride, pad_value, min_dims): h, w, _ = img.shape h = min(min_dims[0], h) min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride min_dims[1] = max(min_dims[1], w) min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride top = int(math.floor((min_dims[0] - h) / 2.0)) left = int(math.floor((min_dims[1] - w) / 2.0)) bottom = int(min_dims[0] - h - top) right = int(min_dims[1] - w - left) pad = [top, left, bottom, right] padded_img = cv2.copyMakeBorder( src=img, top=top, bottom=bottom, left=left, right=right, borderType=cv2.BORDER_CONSTANT, value=pad_value) return padded_img, pad # --------------------------------------------------------------------------------------------------------------------- class CocoHpe2ValTransform(object): def __init__(self, ds_metainfo): self.ds_metainfo = ds_metainfo def __call__(self, src, label): return src, label def extract_keypoints(heatmap, all_keypoints, total_keypoint_num): heatmap[heatmap < 0.1] = 0 heatmap_with_borders = np.pad(heatmap, [(2, 2), (2, 2)], mode="constant") heatmap_center = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 1:heatmap_with_borders.shape[1] - 1] heatmap_left = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 2:heatmap_with_borders.shape[1]] heatmap_right = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 0:heatmap_with_borders.shape[1] - 2] heatmap_up = heatmap_with_borders[2:heatmap_with_borders.shape[0], 1:heatmap_with_borders.shape[1] - 1] heatmap_down = heatmap_with_borders[0:heatmap_with_borders.shape[0] - 2, 1:heatmap_with_borders.shape[1] - 1] heatmap_peaks = (heatmap_center > heatmap_left) &\ (heatmap_center > heatmap_right) &\ (heatmap_center > heatmap_up) &\ (heatmap_center > heatmap_down) heatmap_peaks = heatmap_peaks[1:heatmap_center.shape[0] - 1, 1:heatmap_center.shape[1] - 1] keypoints = list(zip(np.nonzero(heatmap_peaks)[1], np.nonzero(heatmap_peaks)[0])) # (w, h) keypoints = sorted(keypoints, key=itemgetter(0)) suppressed = np.zeros(len(keypoints), np.uint8) keypoints_with_score_and_id = [] keypoint_num = 0 for i in range(len(keypoints)): if suppressed[i]: continue for j in range(i + 1, len(keypoints)): if math.sqrt((keypoints[i][0] - keypoints[j][0]) ** 2 + (keypoints[i][1] - keypoints[j][1]) ** 2) < 6: suppressed[j] = 1 keypoint_with_score_and_id = ( keypoints[i][0], keypoints[i][1], heatmap[keypoints[i][1], keypoints[i][0]], total_keypoint_num + keypoint_num) keypoints_with_score_and_id.append(keypoint_with_score_and_id) keypoint_num += 1 all_keypoints.append(keypoints_with_score_and_id) return keypoint_num def group_keypoints(all_keypoints_by_type, pafs, pose_entry_size=20, min_paf_score=0.05): def linspace2d(start, stop, n=10): points = 1 / (n - 1) * (stop - start) return points[:, None] * np.arange(n) + start[:, None] BODY_PARTS_KPT_IDS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 16], [5, 17]] BODY_PARTS_PAF_IDS = ([12, 13], [20, 21], [14, 15], [16, 17], [22, 23], [24, 25], [0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [28, 29], [30, 31], [34, 35], [32, 33], [36, 37], [18, 19], [26, 27]) pose_entries = [] all_keypoints = np.array([item for sublist in all_keypoints_by_type for item in sublist]) for part_id in range(len(BODY_PARTS_PAF_IDS)): part_pafs = pafs[:, :, BODY_PARTS_PAF_IDS[part_id]] kpts_a = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][0]] kpts_b = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][1]] num_kpts_a = len(kpts_a) num_kpts_b = len(kpts_b) kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0] kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1] if num_kpts_a == 0 and num_kpts_b == 0: # no keypoints for such body part continue elif num_kpts_a == 0: # body part has just 'b' keypoints for i in range(num_kpts_b): num = 0 for j in range(len(pose_entries)): # check if already in some pose, was added by another body part if pose_entries[j][kpt_b_id] == kpts_b[i][3]: num += 1 continue if num == 0: pose_entry = np.ones(pose_entry_size) * -1 pose_entry[kpt_b_id] = kpts_b[i][3] # keypoint idx pose_entry[-1] = 1 # num keypoints in pose pose_entry[-2] = kpts_b[i][2] # pose score pose_entries.append(pose_entry) continue elif num_kpts_b == 0: # body part has just 'a' keypoints for i in range(num_kpts_a): num = 0 for j in range(len(pose_entries)): if pose_entries[j][kpt_a_id] == kpts_a[i][3]: num += 1 continue if num == 0: pose_entry = np.ones(pose_entry_size) * -1 pose_entry[kpt_a_id] = kpts_a[i][3] pose_entry[-1] = 1 pose_entry[-2] = kpts_a[i][2] pose_entries.append(pose_entry) continue connections = [] for i in range(num_kpts_a): kpt_a = np.array(kpts_a[i][0:2]) for j in range(num_kpts_b): kpt_b = np.array(kpts_b[j][0:2]) mid_point = [(), ()] mid_point[0] = (int(round((kpt_a[0] + kpt_b[0]) * 0.5)), int(round((kpt_a[1] + kpt_b[1]) * 0.5))) mid_point[1] = mid_point[0] vec = [kpt_b[0] - kpt_a[0], kpt_b[1] - kpt_a[1]] vec_norm = math.sqrt(vec[0] ** 2 + vec[1] ** 2) if vec_norm == 0: continue vec[0] /= vec_norm vec[1] /= vec_norm cur_point_score = (vec[0] * part_pafs[mid_point[0][1], mid_point[0][0], 0] + vec[1] * part_pafs[mid_point[1][1], mid_point[1][0], 1]) height_n = pafs.shape[0] // 2 success_ratio = 0 point_num = 10 # number of points to integration over paf if cur_point_score > -100: passed_point_score = 0 passed_point_num = 0 x, y = linspace2d(kpt_a, kpt_b) for point_idx in range(point_num): px = int(round(x[point_idx])) py = int(round(y[point_idx])) paf = part_pafs[py, px, 0:2] cur_point_score = vec[0] * paf[0] + vec[1] * paf[1] if cur_point_score > min_paf_score: passed_point_score += cur_point_score passed_point_num += 1 success_ratio = passed_point_num / point_num ratio = 0 if passed_point_num > 0: ratio = passed_point_score / passed_point_num ratio += min(height_n / vec_norm - 1, 0) if ratio > 0 and success_ratio > 0.8: score_all = ratio + kpts_a[i][2] + kpts_b[j][2] connections.append([i, j, ratio, score_all]) if len(connections) > 0: connections = sorted(connections, key=itemgetter(2), reverse=True) num_connections = min(num_kpts_a, num_kpts_b) has_kpt_a = np.zeros(num_kpts_a, dtype=np.int32) has_kpt_b = np.zeros(num_kpts_b, dtype=np.int32) filtered_connections = [] for row in range(len(connections)): if len(filtered_connections) == num_connections: break i, j, cur_point_score = connections[row][0:3] if not has_kpt_a[i] and not has_kpt_b[j]: filtered_connections.append([kpts_a[i][3], kpts_b[j][3], cur_point_score]) has_kpt_a[i] = 1 has_kpt_b[j] = 1 connections = filtered_connections if len(connections) == 0: continue if part_id == 0: pose_entries = [np.ones(pose_entry_size) * -1 for _ in range(len(connections))] for i in range(len(connections)): pose_entries[i][BODY_PARTS_KPT_IDS[0][0]] = connections[i][0] pose_entries[i][BODY_PARTS_KPT_IDS[0][1]] = connections[i][1] pose_entries[i][-1] = 2 pose_entries[i][-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2] elif part_id == 17 or part_id == 18: kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0] kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1] for i in range(len(connections)): for j in range(len(pose_entries)): if pose_entries[j][kpt_a_id] == connections[i][0] and pose_entries[j][kpt_b_id] == -1: pose_entries[j][kpt_b_id] = connections[i][1] elif pose_entries[j][kpt_b_id] == connections[i][1] and pose_entries[j][kpt_a_id] == -1: pose_entries[j][kpt_a_id] = connections[i][0] continue else: kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0] kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1] for i in range(len(connections)): num = 0 for j in range(len(pose_entries)): if pose_entries[j][kpt_a_id] == connections[i][0]: pose_entries[j][kpt_b_id] = connections[i][1] num += 1 pose_entries[j][-1] += 1 pose_entries[j][-2] += all_keypoints[connections[i][1], 2] + connections[i][2] if num == 0: pose_entry = np.ones(pose_entry_size) * -1 pose_entry[kpt_a_id] = connections[i][0] pose_entry[kpt_b_id] = connections[i][1] pose_entry[-1] = 2 pose_entry[-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2] pose_entries.append(pose_entry) filtered_entries = [] for i in range(len(pose_entries)): if pose_entries[i][-1] < 3 or (pose_entries[i][-2] / pose_entries[i][-1] < 0.2): continue filtered_entries.append(pose_entries[i]) pose_entries = np.asarray(filtered_entries) return pose_entries, all_keypoints def convert_to_coco_format(pose_entries, all_keypoints): coco_keypoints = [] scores = [] for n in range(len(pose_entries)): if len(pose_entries[n]) == 0: continue keypoints = [0] * 17 * 3 to_coco_map = [0, -1, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3] person_score = pose_entries[n][-2] position_id = -1 for keypoint_id in pose_entries[n][:-2]: position_id += 1 if position_id == 1: # no 'neck' in COCO continue cx, cy, score, visibility = 0, 0, 0, 0 # keypoint not found if keypoint_id != -1: cx, cy, score = all_keypoints[int(keypoint_id), 0:3] cx = cx + 0.5 cy = cy + 0.5 visibility = 1 keypoints[to_coco_map[position_id] * 3 + 0] = cx keypoints[to_coco_map[position_id] * 3 + 1] = cy keypoints[to_coco_map[position_id] * 3 + 2] = visibility coco_keypoints.append(keypoints) scores.append(person_score * max(0, (pose_entries[n][-1] - 1))) # -1 for 'neck' return coco_keypoints, scores def recalc_pose(pred, label): label_img_id = label[:, 0].astype(np.int32) # label_score = label[:, 1] pred = pred.transpose((0, 3, 1, 2)) pads = label[:, 2:6].astype(np.int32) heights = label[:, 6].astype(np.int32) widths = label[:, 7].astype(np.int32) keypoints = 19 stride = 8 heatmap2ds = pred[:, :keypoints] paf2ds = pred[:, keypoints:(3 * keypoints)] pred_pts_score = [] pred_person_score = [] label_img_id_ = [] batch = pred.shape[0] for batch_i in range(batch): label_img_id_i = label_img_id[batch_i] pad = list(pads[batch_i]) height = int(heights[batch_i]) width = int(widths[batch_i]) heatmap2d = heatmap2ds[batch_i] paf2d = paf2ds[batch_i] heatmaps = np.transpose(heatmap2d, (1, 2, 0)) heatmaps = cv2.resize(heatmaps, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) heatmaps = heatmaps[pad[0]:heatmaps.shape[0] - pad[2], pad[1]:heatmaps.shape[1] - pad[3]:, :] heatmaps = cv2.resize(heatmaps, (width, height), interpolation=cv2.INTER_CUBIC) pafs = np.transpose(paf2d, (1, 2, 0)) pafs = cv2.resize(pafs, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) pafs = pafs[pad[0]:pafs.shape[0] - pad[2], pad[1]:pafs.shape[1] - pad[3], :] pafs = cv2.resize(pafs, (width, height), interpolation=cv2.INTER_CUBIC) total_keypoints_num = 0 all_keypoints_by_type = [] for kpt_idx in range(18): # 19th for bg total_keypoints_num += extract_keypoints( heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num) pose_entries, all_keypoints = group_keypoints( all_keypoints_by_type, pafs) coco_keypoints, scores = convert_to_coco_format( pose_entries, all_keypoints) pred_pts_score.append(coco_keypoints) pred_person_score.append(scores) label_img_id_.append([label_img_id_i] * len(scores)) return np.array(pred_pts_score).reshape((-1, 17, 3)), np.array(pred_person_score)[0], np.array(label_img_id_[0]) # --------------------------------------------------------------------------------------------------------------------- class CocoHpe2MetaInfo(DatasetMetaInfo): def __init__(self): super(CocoHpe2MetaInfo, self).__init__() self.label = "COCO" self.short_label = "coco" self.root_dir_name = "coco" self.dataset_class = CocoHpe2Dataset self.num_training_samples = None self.in_channels = 3 self.num_classes = 17 self.input_image_size = (368, 368) self.train_metric_capts = None self.train_metric_names = None self.train_metric_extra_kwargs = None self.val_metric_capts = None self.val_metric_names = None self.test_metric_capts = ["Val.CocoOksAp"] self.test_metric_names = ["CocoHpeOksApMetric"] self.test_metric_extra_kwargs = [ {"name": "OksAp", "coco_annotations_file_path": None, "use_file": False, "pose_postprocessing_fn": lambda x, y: recalc_pose(x, y)}] self.saver_acc_ind = 0 self.do_transform = True self.test_transform = cocohpe_val_transform self.test_transform2 = CocoHpe2ValTransform self.test_generator = cocohpe_test_generator self.ml_type = "hpe" self.net_extra_kwargs = {} self.mean_rgb = (0.485, 0.456, 0.406) self.std_rgb = (0.229, 0.224, 0.225) self.load_ignore_extra = False def add_dataset_parser_arguments(self, parser, work_dir_path): """ Create python script parameters (for ImageNet-1K dataset metainfo). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. work_dir_path : str Path to working directory. """ super(CocoHpe2MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--input-size", type=int, nargs=2, default=self.input_image_size, help="size of the input for model") parser.add_argument( "--load-ignore-extra", action="store_true", help="ignore extra layers in the source PyTroch model") def update(self, args): """ Update ImageNet-1K dataset metainfo after user customizing. Parameters: ---------- args : ArgumentParser Main script arguments. """ super(CocoHpe2MetaInfo, self).update(args) self.input_image_size = args.input_size self.load_ignore_extra = args.load_ignore_extra def update_from_dataset(self, dataset): """ Update dataset metainfo after a dataset class instance creation. Parameters: ---------- args : obj A dataset class instance. """ self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path # --------------------------------------------------------------------------------------------------------------------- class CocoHpeDirectoryIterator(DirectoryIterator): allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None} def __init__(self, directory, image_data_generator, target_size=(368, 368), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest', dtype='float32', dataset=None): super(CocoHpeDirectoryIterator, self).set_processing_attrs( image_data_generator, target_size, color_mode, data_format, save_to_dir, save_prefix, save_format, subset, interpolation) self.dataset = dataset self.class_mode = class_mode self.dtype = dtype self.n = len(self.dataset) self.batch_size = batch_size self.seed = seed self.shuffle = shuffle self.batch_index = 0 self.total_batches_seen = 0 self.lock = threading.Lock() self.index_array = None self.index_generator = self._flow_index() def _get_batches_of_transformed_samples(self, index_array): """Gets a batch of transformed samples. # Arguments index_array: Array of sample indices to include in batch. # Returns: A batch of transformed samples. """ batch_x = None batch_y = None for i, j in enumerate(index_array): x, y = self.dataset[j] if batch_x is None: batch_x = np.zeros((len(index_array),) + x.shape, dtype=self.dtype) batch_y = np.zeros((len(index_array),) + y.shape, dtype=np.float32) batch_x[i] = x batch_y[i] = y return batch_x, batch_y class CocoHpeImageDataGenerator(ImageDataGenerator): def flow_from_directory(self, directory, target_size=(368, 368), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest', dataset=None): return CocoHpeDirectoryIterator( directory, self, target_size=target_size, color_mode=color_mode, classes=classes, class_mode=class_mode, data_format=self.data_format, batch_size=batch_size, shuffle=shuffle, seed=seed, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, follow_links=follow_links, subset=subset, interpolation=interpolation, dataset=dataset) def cocohpe_val_transform(ds_metainfo, data_format="channels_last"): """ Create image transform sequence for validation subset. Parameters: ---------- ds_metainfo : DatasetMetaInfo Pascal VOC2012 dataset metainfo. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- ImageDataGenerator Image transform sequence. """ data_generator = CocoHpeImageDataGenerator( preprocessing_function=(lambda img: ds_metainfo.val_transform2(ds_metainfo=ds_metainfo)(img)), data_format=data_format) return data_generator def cocohpe_val_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for validation subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo Pascal VOC2012 dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ split = "val" root = ds_metainfo.root_dir_path root = os.path.join(root, split) generator = data_generator.flow_from_directory( directory=root, target_size=ds_metainfo.input_image_size, class_mode="binary", batch_size=batch_size, shuffle=False, interpolation="bilinear", dataset=ds_metainfo.dataset_class( root=ds_metainfo.root_dir_path, mode="val", transform=ds_metainfo.val_transform2( ds_metainfo=ds_metainfo))) return generator def cocohpe_test_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for testing subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo Pascal VOC2012 dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ split = "val" root = ds_metainfo.root_dir_path root = os.path.join(root, split) generator = data_generator.flow_from_directory( directory=root, target_size=ds_metainfo.input_image_size, class_mode="binary", batch_size=batch_size, shuffle=False, interpolation="bilinear", dataset=ds_metainfo.dataset_class( root=ds_metainfo.root_dir_path, mode="test", transform=ds_metainfo.test_transform2( ds_metainfo=ds_metainfo))) return generator
27,367
37.011111
119
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/svhn_cls_dataset.py
""" SVHN classification dataset. """ import os import hashlib import numpy as np from .cifar10_cls_dataset import CIFAR10MetaInfo def _download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True): """Download an given URL Parameters: ---------- url : str URL to download path : str, optional Destination path to store downloaded file. By default stores to the current directory with same name as in url. overwrite : bool, optional Whether to overwrite destination file if already exists. sha1_hash : str, optional Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified but doesn't match. retries : integer, default 5 The number of times to attempt the download in case of failure or non 200 return codes verify_ssl : bool, default True Verify SSL certificates. Returns: ------- str The file path of the downloaded file. """ import warnings try: import requests except ImportError: class requests_failed_to_import(object): pass requests = requests_failed_to_import if path is None: fname = url.split("/")[-1] # Empty filenames are invalid assert fname, "Can't construct file-name from this URL. Please set the `path` option manually." else: path = os.path.expanduser(path) if os.path.isdir(path): fname = os.path.join(path, url.split("/")[-1]) else: fname = path assert retries >= 0, "Number of retries should be at least 0" if not verify_ssl: warnings.warn( "Unverified HTTPS request is being made (verify_ssl=False). " "Adding certificate verification is strongly advised.") if overwrite or not os.path.exists(fname) or (sha1_hash and not _check_sha1(fname, sha1_hash)): dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname))) if not os.path.exists(dirname): os.makedirs(dirname) while retries + 1 > 0: # Disable pyling too broad Exception # pylint: disable=W0703 try: print("Downloading {} from {}...".format(fname, url)) r = requests.get(url, stream=True, verify=verify_ssl) if r.status_code != 200: raise RuntimeError("Failed downloading url {}".format(url)) with open(fname, "wb") as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) if sha1_hash and not _check_sha1(fname, sha1_hash): raise UserWarning("File {} is downloaded but the content hash does not match." " The repo may be outdated or download may be incomplete. " "If the 'repo_url' is overridden, consider switching to " "the default repo.".format(fname)) break except Exception as e: retries -= 1 if retries <= 0: raise e else: print("download failed, retrying, {} attempt{} left" .format(retries, "s" if retries > 1 else "")) return fname def _check_sha1(filename, sha1_hash): """Check whether the sha1 hash of the file content matches the expected hash. Parameters: ---------- filename : str Path to the file. sha1_hash : str Expected sha1 hash in hexadecimal digits. Returns: ------- bool Whether the file content matches the expected hash. """ sha1 = hashlib.sha1() with open(filename, "rb") as f: while True: data = f.read(1048576) if not data: break sha1.update(data) return sha1.hexdigest() == sha1_hash def get_svhn_data(root, mode): """ SVHN image classification dataset from http://ufldl.stanford.edu/housenumbers/. Each sample is an image (in 3D NDArray) with shape (32, 32, 3). Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset, we assign the label `0` to the digit `0`. Parameters: ---------- root : str Path to temp folder for storing data. mode : str 'train', 'val', or 'test'. """ _train_data = [("http://ufldl.stanford.edu/housenumbers/train_32x32.mat", "train_32x32.mat", "e6588cae42a1a5ab5efe608cc5cd3fb9aaffd674")] _test_data = [("http://ufldl.stanford.edu/housenumbers/test_32x32.mat", "test_32x32.mat", "29b312382ca6b9fba48d41a7b5c19ad9a5462b20")] if any(not os.path.exists(path) or not _check_sha1(path, sha1) for path, sha1 in ((os.path.join(root, name), sha1) for _, name, sha1 in _train_data + _test_data)): for url, _, sha1 in _train_data + _test_data: _download(url=url, path=root, sha1_hash=sha1) if mode == "train": data_files = _train_data[0] else: data_files = _test_data[0] import scipy.io as sio loaded_mat = sio.loadmat(os.path.join(root, data_files[1])) data = loaded_mat["X"] data = np.transpose(data, (3, 0, 1, 2)) label = loaded_mat["y"].astype(np.int32).squeeze() np.place(label, label == 10, 0) return data, label class SVHNMetaInfo(CIFAR10MetaInfo): def __init__(self): super(SVHNMetaInfo, self).__init__() self.label = "SVHN" self.root_dir_name = "svhn" self.dataset_class = None self.num_training_samples = 73257 self.train_generator = svhn_train_generator self.val_generator = svhn_val_generator self.test_generator = svhn_val_generator def svhn_train_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for training subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ assert(ds_metainfo is not None) x_train, y_train = get_svhn_data( root=ds_metainfo.root_dir_path, mode="train") generator = data_generator.flow( x=x_train, y=y_train, batch_size=batch_size, shuffle=False) return generator def svhn_val_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for validation subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ assert(ds_metainfo is not None) x_test, y_test = get_svhn_data( root=ds_metainfo.root_dir_path, mode="val") generator = data_generator.flow( x=x_test, y=y_test, batch_size=batch_size, shuffle=False) return generator
7,496
30.902128
103
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/coco_hpe3_dataset.py
""" COCO keypoint detection (2D multiple human pose estimation) dataset (for IBPPose). """ import os import threading import math import cv2 import numpy as np from tensorflow.keras.preprocessing.image import ImageDataGenerator, DirectoryIterator from .dataset_metainfo import DatasetMetaInfo class CocoHpe3Dataset(object): """ COCO keypoint detection (2D multiple human pose estimation) dataset. Parameters: ---------- root : string Path to `annotations`, `train2017`, and `val2017` folders. mode : string, default 'train' 'train', 'val', 'test', or 'demo'. transform : callable, optional A function that transforms the image. """ def __init__(self, root, mode="train", transform=None): super(CocoHpe3Dataset, self).__init__() self._root = os.path.expanduser(root) self.mode = mode self.transform = transform mode_name = "train" if mode == "train" else "val" annotations_dir_path = os.path.join(root, "annotations") annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json") # with open(annotations_file_path, "r") as f: # self.file_names = json.load(f)["images"] self.image_dir_path = os.path.join(root, mode_name + "2017") self.annotations_file_path = annotations_file_path from pycocotools.coco import COCO self.coco_gt = COCO(self.annotations_file_path) self.validation_ids = self.coco_gt.getImgIds()[:] def __str__(self): return self.__class__.__name__ + "(" + self._root + ")" def __len__(self): return len(self.validation_ids) def __getitem__(self, idx): # file_name = self.file_names[idx]["file_name"] image_id = self.validation_ids[idx] file_name = self.coco_gt.imgs[image_id]["file_name"] image_file_path = os.path.join(self.image_dir_path, file_name) image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR) # image = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB) image_src_shape = image.shape[:2] boxsize = 512 max_downsample = 64 pad_value = 128 scale = boxsize / image.shape[0] if scale * image.shape[0] > 2600 or scale * image.shape[1] > 3800: scale = min(2600 / image.shape[0], 3800 / image.shape[1]) image = cv2.resize(image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) image, pad = self.pad_right_down_corner(image, max_downsample, pad_value) image = np.float32(image / 255) # image = image.transpose((2, 0, 1)) # image_id = int(os.path.splitext(os.path.basename(file_name))[0]) label = np.array([image_id, 1.0] + pad + list(image_src_shape), np.float32) return image, label @staticmethod def pad_right_down_corner(img, stride, pad_value): h = img.shape[0] w = img.shape[1] pad = 4 * [None] pad[0] = 0 # up pad[1] = 0 # left pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right img_padded = img pad_up = np.tile(img_padded[0:1, :, :] * 0 + pad_value, (pad[0], 1, 1)) img_padded = np.concatenate((pad_up, img_padded), axis=0) pad_left = np.tile(img_padded[:, 0:1, :] * 0 + pad_value, (1, pad[1], 1)) img_padded = np.concatenate((pad_left, img_padded), axis=1) pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + pad_value, (pad[2], 1, 1)) img_padded = np.concatenate((img_padded, pad_down), axis=0) pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + pad_value, (1, pad[3], 1)) img_padded = np.concatenate((img_padded, pad_right), axis=1) return img_padded, pad # --------------------------------------------------------------------------------------------------------------------- class CocoHpe2ValTransform(object): def __init__(self, ds_metainfo): self.ds_metainfo = ds_metainfo def __call__(self, src, label): return src, label def recalc_pose(pred, label): dt_gt_mapping = {0: 0, 1: None, 2: 6, 3: 8, 4: 10, 5: 5, 6: 7, 7: 9, 8: 12, 9: 14, 10: 16, 11: 11, 12: 13, 13: 15, 14: 2, 15: 1, 16: 4, 17: 3} parts = ["nose", "neck", "Rsho", "Relb", "Rwri", "Lsho", "Lelb", "Lwri", "Rhip", "Rkne", "Rank", "Lhip", "Lkne", "Lank", "Reye", "Leye", "Rear", "Lear"] num_parts = len(parts) parts_dict = dict(zip(parts, range(num_parts))) limb_from = ['neck', 'neck', 'neck', 'neck', 'neck', 'nose', 'nose', 'Reye', 'Leye', 'neck', 'Rsho', 'Relb', 'neck', 'Lsho', 'Lelb', 'neck', 'Rhip', 'Rkne', 'neck', 'Lhip', 'Lkne', 'nose', 'nose', 'Rsho', 'Rhip', 'Lsho', 'Lhip', 'Rear', 'Lear', 'Rhip'] limb_to = ['nose', 'Reye', 'Leye', 'Rear', 'Lear', 'Reye', 'Leye', 'Rear', 'Lear', 'Rsho', 'Relb', 'Rwri', 'Lsho', 'Lelb', 'Lwri', 'Rhip', 'Rkne', 'Rank', 'Lhip', 'Lkne', 'Lank', 'Rsho', 'Lsho', 'Rhip', 'Lkne', 'Lhip', 'Rkne', 'Rsho', 'Lsho', 'Lhip'] limb_from = [parts_dict[n] for n in limb_from] limb_to = [parts_dict[n] for n in limb_to] assert limb_from == [x for x in [ 1, 1, 1, 1, 1, 0, 0, 14, 15, 1, 2, 3, 1, 5, 6, 1, 8, 9, 1, 11, 12, 0, 0, 2, 8, 5, 11, 16, 17, 8]] assert limb_to == [x for x in [ 0, 14, 15, 16, 17, 14, 15, 16, 17, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 2, 5, 8, 12, 11, 9, 2, 5, 11]] limbs_conn = list(zip(limb_from, limb_to)) limb_seq = limbs_conn paf_layers = 30 num_layers = 50 stride = 4 label_img_id = label[:, 0].astype(np.int32) # label_score = label[:, 1] pads = label[:, 2:6].astype(np.int32) image_src_shapes = label[:, 6:8].astype(np.int32) pred_pts_score = [] pred_person_score = [] label_img_id_ = [] batch = pred.shape[0] for batch_i in range(batch): label_img_id_i = label_img_id[batch_i] pad = list(pads[batch_i]) image_src_shape = list(image_src_shapes[batch_i]) # output_blob = pred[batch_i].transpose((1, 2, 0)) output_blob = pred[batch_i] output_paf = output_blob[:, :, :paf_layers] output_heatmap = output_blob[:, :, paf_layers:num_layers] heatmap = cv2.resize(output_heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) heatmap = heatmap[ pad[0]:(output_blob.shape[0] * stride - pad[2]), pad[1]:(output_blob.shape[1] * stride - pad[3]), :] heatmap = cv2.resize(heatmap, (image_src_shape[1], image_src_shape[0]), interpolation=cv2.INTER_CUBIC) paf = cv2.resize(output_paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) paf = paf[ pad[0]:(output_blob.shape[0] * stride - pad[2]), pad[1]:(output_blob.shape[1] * stride - pad[3]), :] paf = cv2.resize(paf, (image_src_shape[1], image_src_shape[0]), interpolation=cv2.INTER_CUBIC) all_peaks = find_peaks(heatmap) connection_all, special_k = find_connections(all_peaks, paf, image_src_shape[0], limb_seq) subset, candidate = find_people(connection_all, special_k, all_peaks, limb_seq) for s in subset[..., 0]: keypoint_indexes = s[:18] person_keypoint_coordinates = [] for index in keypoint_indexes: if index == -1: X, Y, C = 0, 0, 0 else: X, Y, C = list(candidate[index.astype(int)][:2]) + [1] person_keypoint_coordinates.append([X, Y, C]) person_keypoint_coordinates_coco = [None] * 17 for dt_index, gt_index in dt_gt_mapping.items(): if gt_index is None: continue person_keypoint_coordinates_coco[gt_index] = person_keypoint_coordinates[dt_index] pred_pts_score.append(person_keypoint_coordinates_coco) pred_person_score.append(1 - 1.0 / s[18]) label_img_id_.append(label_img_id_i) return np.array(pred_pts_score).reshape((-1, 17, 3)), np.array(pred_person_score), np.array(label_img_id_) def find_peaks(heatmap_avg): import torch thre1 = 0.1 offset_radius = 2 all_peaks = [] peak_counter = 0 heatmap_avg = heatmap_avg.astype(np.float32) filter_map = heatmap_avg[:, :, :18].copy().transpose((2, 0, 1))[None, ...] filter_map = torch.from_numpy(filter_map).cuda() filter_map = keypoint_heatmap_nms(filter_map, kernel=3, thre=thre1) filter_map = filter_map.cpu().numpy().squeeze().transpose((1, 2, 0)) for part in range(18): map_ori = heatmap_avg[:, :, part] peaks_binary = filter_map[:, :, part] peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse refined_peaks_with_score = [refine_centroid(map_ori, anchor, offset_radius) for anchor in peaks] id = range(peak_counter, peak_counter + len(refined_peaks_with_score)) peaks_with_score_and_id = [refined_peaks_with_score[i] + (id[i],) for i in range(len(id))] all_peaks.append(peaks_with_score_and_id) peak_counter += len(peaks) return all_peaks def keypoint_heatmap_nms(heat, kernel=3, thre=0.1): from torch.nn import functional as F # keypoint NMS on heatmap (score map) pad = (kernel - 1) // 2 pad_heat = F.pad(heat, (pad, pad, pad, pad), mode="reflect") hmax = F.max_pool2d(pad_heat, (kernel, kernel), stride=1, padding=0) keep = (hmax == heat).float() * (heat >= thre).float() return heat * keep def refine_centroid(scorefmp, anchor, radius): """ Refine the centroid coordinate. It dose not affect the results after testing. :param scorefmp: 2-D numpy array, original regressed score map :param anchor: python tuple, (x,y) coordinates :param radius: int, range of considered scores :return: refined anchor, refined score """ x_c, y_c = anchor x_min = x_c - radius x_max = x_c + radius + 1 y_min = y_c - radius y_max = y_c + radius + 1 if y_max > scorefmp.shape[0] or y_min < 0 or x_max > scorefmp.shape[1] or x_min < 0: return anchor + (scorefmp[y_c, x_c], ) score_box = scorefmp[y_min:y_max, x_min:x_max] x_grid, y_grid = np.mgrid[-radius:radius + 1, -radius:radius + 1] offset_x = (score_box * x_grid).sum() / score_box.sum() offset_y = (score_box * y_grid).sum() / score_box.sum() x_refine = x_c + offset_x y_refine = y_c + offset_y refined_anchor = (x_refine, y_refine) return refined_anchor + (score_box.mean(),) def find_connections(all_peaks, paf_avg, image_width, limb_seq): mid_num_ = 20 thre2 = 0.1 connect_ration = 0.8 connection_all = [] special_k = [] for k in range(len(limb_seq)): score_mid = paf_avg[:, :, k] candA = all_peaks[limb_seq[k][0]] candB = all_peaks[limb_seq[k][1]] nA = len(candA) nB = len(candB) if nA != 0 and nB != 0: connection_candidate = [] for i in range(nA): for j in range(nB): vec = np.subtract(candB[j][:2], candA[i][:2]) norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) mid_num = min(int(round(norm + 1)), mid_num_) if norm == 0: continue startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), np.linspace(candA[i][1], candB[j][1], num=mid_num))) limb_response = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0]))] for I in range(len(startend))]) score_midpts = limb_response score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(0.5 * image_width / norm - 1, 0) criterion1 = len(np.nonzero(score_midpts > thre2)[0]) >= connect_ration * len(score_midpts) criterion2 = score_with_dist_prior > 0 if criterion1 and criterion2: connection_candidate.append([ i, j, score_with_dist_prior, norm, 0.5 * score_with_dist_prior + 0.25 * candA[i][2] + 0.25 * candB[j][2]]) connection_candidate = sorted(connection_candidate, key=lambda x: x[4], reverse=True) connection = np.zeros((0, 6)) for c in range(len(connection_candidate)): i, j, s, limb_len = connection_candidate[c][0:4] if i not in connection[:, 3] and j not in connection[:, 4]: connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j, limb_len]]) if len(connection) >= min(nA, nB): break connection_all.append(connection) else: special_k.append(k) connection_all.append([]) return connection_all, special_k def find_people(connection_all, special_k, all_peaks, limb_seq): len_rate = 16.0 connection_tole = 0.7 remove_recon = 0 subset = -1 * np.ones((0, 20, 2)) candidate = np.array([item for sublist in all_peaks for item in sublist]) for k in range(len(limb_seq)): if k not in special_k: partAs = connection_all[k][:, 0] partBs = connection_all[k][:, 1] indexA, indexB = np.array(limb_seq[k]) for i in range(len(connection_all[k])): found = 0 subset_idx = [-1, -1] for j in range(len(subset)): if subset[j][indexA][0].astype(int) == (partAs[i]).astype(int) or subset[j][indexB][0].astype( int) == partBs[i].astype(int): if found >= 2: continue subset_idx[found] = j found += 1 if found == 1: j = subset_idx[0] if subset[j][indexB][0].astype(int) == -1 and\ len_rate * subset[j][-1][1] > connection_all[k][i][-1]: subset[j][indexB][0] = partBs[i] subset[j][indexB][1] = connection_all[k][i][2] subset[j][-1][0] += 1 subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1]) elif subset[j][indexB][0].astype(int) != partBs[i].astype(int): if subset[j][indexB][1] >= connection_all[k][i][2]: pass else: if len_rate * subset[j][-1][1] <= connection_all[k][i][-1]: continue subset[j][-2][0] -= candidate[subset[j][indexB][0].astype(int), 2] + subset[j][indexB][1] subset[j][indexB][0] = partBs[i] subset[j][indexB][1] = connection_all[k][i][2] subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1]) elif subset[j][indexB][0].astype(int) == partBs[i].astype(int) and\ subset[j][indexB][1] <= connection_all[k][i][2]: subset[j][-2][0] -= candidate[subset[j][indexB][0].astype(int), 2] + subset[j][indexB][1] subset[j][indexB][0] = partBs[i] subset[j][indexB][1] = connection_all[k][i][2] subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1]) else: pass elif found == 2: j1, j2 = subset_idx membership1 = ((subset[j1][..., 0] >= 0).astype(int))[:-2] membership2 = ((subset[j2][..., 0] >= 0).astype(int))[:-2] membership = membership1 + membership2 if len(np.nonzero(membership == 2)[0]) == 0: min_limb1 = np.min(subset[j1, :-2, 1][membership1 == 1]) min_limb2 = np.min(subset[j2, :-2, 1][membership2 == 1]) min_tolerance = min(min_limb1, min_limb2) if connection_all[k][i][2] < connection_tole * min_tolerance or\ len_rate * subset[j1][-1][1] <= connection_all[k][i][-1]: continue subset[j1][:-2][...] += (subset[j2][:-2][...] + 1) subset[j1][-2:][:, 0] += subset[j2][-2:][:, 0] subset[j1][-2][0] += connection_all[k][i][2] subset[j1][-1][1] = max(connection_all[k][i][-1], subset[j1][-1][1]) subset = np.delete(subset, j2, 0) else: if connection_all[k][i][0] in subset[j1, :-2, 0]: c1 = np.where(subset[j1, :-2, 0] == connection_all[k][i][0]) c2 = np.where(subset[j2, :-2, 0] == connection_all[k][i][1]) else: c1 = np.where(subset[j1, :-2, 0] == connection_all[k][i][1]) c2 = np.where(subset[j2, :-2, 0] == connection_all[k][i][0]) c1 = int(c1[0]) c2 = int(c2[0]) assert c1 != c2, "an candidate keypoint is used twice, shared by two people" if connection_all[k][i][2] < subset[j1][c1][1] and connection_all[k][i][2] < subset[j2][c2][1]: continue small_j = j1 remove_c = c1 if subset[j1][c1][1] > subset[j2][c2][1]: small_j = j2 remove_c = c2 if remove_recon > 0: subset[small_j][-2][0] -= candidate[subset[small_j][remove_c][0].astype(int), 2] + \ subset[small_j][remove_c][1] subset[small_j][remove_c][0] = -1 subset[small_j][remove_c][1] = -1 subset[small_j][-1][0] -= 1 elif not found and k < len(limb_seq): row = -1 * np.ones((20, 2)) row[indexA][0] = partAs[i] row[indexA][1] = connection_all[k][i][2] row[indexB][0] = partBs[i] row[indexB][1] = connection_all[k][i][2] row[-1][0] = 2 row[-1][1] = connection_all[k][i][-1] row[-2][0] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] row = row[np.newaxis, :, :] subset = np.concatenate((subset, row), axis=0) deleteIdx = [] for i in range(len(subset)): if subset[i][-1][0] < 2 or subset[i][-2][0] / subset[i][-1][0] < 0.45: deleteIdx.append(i) subset = np.delete(subset, deleteIdx, axis=0) return subset, candidate # --------------------------------------------------------------------------------------------------------------------- class CocoHpe3MetaInfo(DatasetMetaInfo): def __init__(self): super(CocoHpe3MetaInfo, self).__init__() self.label = "COCO" self.short_label = "coco" self.root_dir_name = "coco" self.dataset_class = CocoHpe3Dataset self.num_training_samples = None self.in_channels = 3 self.num_classes = 17 self.input_image_size = (256, 256) self.train_metric_capts = None self.train_metric_names = None self.train_metric_extra_kwargs = None self.val_metric_capts = None self.val_metric_names = None self.test_metric_capts = ["Val.CocoOksAp"] self.test_metric_names = ["CocoHpeOksApMetric"] self.test_metric_extra_kwargs = [ {"name": "OksAp", "coco_annotations_file_path": None, "validation_ids": None, "use_file": False, "pose_postprocessing_fn": lambda x, y: recalc_pose(x, y)}] self.saver_acc_ind = 0 self.do_transform = True self.test_transform = cocohpe_val_transform self.test_transform2 = CocoHpe2ValTransform self.test_generator = cocohpe_test_generator self.ml_type = "hpe" self.net_extra_kwargs = {} self.mean_rgb = (0.485, 0.456, 0.406) self.std_rgb = (0.229, 0.224, 0.225) self.load_ignore_extra = False def add_dataset_parser_arguments(self, parser, work_dir_path): """ Create python script parameters (for ImageNet-1K dataset metainfo). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. work_dir_path : str Path to working directory. """ super(CocoHpe3MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--input-size", type=int, nargs=2, default=self.input_image_size, help="size of the input for model") parser.add_argument( "--load-ignore-extra", action="store_true", help="ignore extra layers in the source PyTroch model") def update(self, args): """ Update ImageNet-1K dataset metainfo after user customizing. Parameters: ---------- args : ArgumentParser Main script arguments. """ super(CocoHpe3MetaInfo, self).update(args) self.input_image_size = args.input_size self.load_ignore_extra = args.load_ignore_extra def update_from_dataset(self, dataset): """ Update dataset metainfo after a dataset class instance creation. Parameters: ---------- args : obj A dataset class instance. """ self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path # self.test_metric_extra_kwargs[0]["validation_ids"] = dataset.validation_ids # --------------------------------------------------------------------------------------------------------------------- class CocoHpeDirectoryIterator(DirectoryIterator): allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None} def __init__(self, directory, image_data_generator, target_size=(368, 368), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest', dtype='float32', dataset=None): super(CocoHpeDirectoryIterator, self).set_processing_attrs( image_data_generator, target_size, color_mode, data_format, save_to_dir, save_prefix, save_format, subset, interpolation) self.dataset = dataset self.class_mode = class_mode self.dtype = dtype self.n = len(self.dataset) self.batch_size = batch_size self.seed = seed self.shuffle = shuffle self.batch_index = 0 self.total_batches_seen = 0 self.lock = threading.Lock() self.index_array = None self.index_generator = self._flow_index() def _get_batches_of_transformed_samples(self, index_array): """Gets a batch of transformed samples. # Arguments index_array: Array of sample indices to include in batch. # Returns: A batch of transformed samples. """ batch_x = None batch_y = None for i, j in enumerate(index_array): x, y = self.dataset[j] if batch_x is None: batch_x = np.zeros((len(index_array),) + x.shape, dtype=self.dtype) batch_y = np.zeros((len(index_array),) + y.shape, dtype=np.float32) batch_x[i] = x batch_y[i] = y return batch_x, batch_y class CocoHpeImageDataGenerator(ImageDataGenerator): def flow_from_directory(self, directory, target_size=(368, 368), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest', dataset=None): return CocoHpeDirectoryIterator( directory, self, target_size=target_size, color_mode=color_mode, classes=classes, class_mode=class_mode, data_format=self.data_format, batch_size=batch_size, shuffle=shuffle, seed=seed, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, follow_links=follow_links, subset=subset, interpolation=interpolation, dataset=dataset) def cocohpe_val_transform(ds_metainfo, data_format="channels_last"): """ Create image transform sequence for validation subset. Parameters: ---------- ds_metainfo : DatasetMetaInfo Pascal VOC2012 dataset metainfo. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- ImageDataGenerator Image transform sequence. """ data_generator = CocoHpeImageDataGenerator( preprocessing_function=(lambda img: ds_metainfo.val_transform2(ds_metainfo=ds_metainfo)(img)), data_format=data_format) return data_generator def cocohpe_val_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for validation subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo Pascal VOC2012 dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ split = "val" root = ds_metainfo.root_dir_path root = os.path.join(root, split) generator = data_generator.flow_from_directory( directory=root, target_size=ds_metainfo.input_image_size, class_mode="binary", batch_size=batch_size, shuffle=False, interpolation="bilinear", dataset=ds_metainfo.dataset_class( root=ds_metainfo.root_dir_path, mode="val", transform=ds_metainfo.val_transform2( ds_metainfo=ds_metainfo))) return generator def cocohpe_test_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for testing subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo Pascal VOC2012 dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ split = "val" root = ds_metainfo.root_dir_path root = os.path.join(root, split) generator = data_generator.flow_from_directory( directory=root, target_size=ds_metainfo.input_image_size, class_mode="binary", batch_size=batch_size, shuffle=False, interpolation="bilinear", dataset=ds_metainfo.dataset_class( root=ds_metainfo.root_dir_path, mode="test", transform=ds_metainfo.test_transform2( ds_metainfo=ds_metainfo))) return generator
29,689
37.408797
120
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/cls_dataset.py
""" Classification dataset routines. """ __all__ = ['img_normalization'] import numpy as np def img_normalization(img, mean_rgb, std_rgb): """ Normalization as in the ImageNet-1K validation procedure. Parameters: ---------- img : np.array input image. mean_rgb : tuple of 3 float Mean of RGB channels in the dataset. std_rgb : tuple of 3 float STD of RGB channels in the dataset. Returns: ------- np.array Output image. """ # print(img.max()) mean_rgb = np.array(mean_rgb, np.float32) * 255.0 std_rgb = np.array(std_rgb, np.float32) * 255.0 img = (img - mean_rgb) / std_rgb return img
735
20.028571
61
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/cifar10_cls_dataset.py
""" CIFAR-10 classification dataset. """ from tensorflow.keras.datasets import cifar10 from tensorflow.keras.preprocessing.image import ImageDataGenerator from .dataset_metainfo import DatasetMetaInfo from .cls_dataset import img_normalization class CIFAR10MetaInfo(DatasetMetaInfo): def __init__(self): super(CIFAR10MetaInfo, self).__init__() self.label = "CIFAR10" self.short_label = "cifar" self.root_dir_name = "cifar10" self.dataset_class = None self.num_training_samples = 50000 self.in_channels = 3 self.num_classes = 10 self.input_image_size = (32, 32) self.train_metric_capts = ["Train.Err"] self.train_metric_names = ["Top1Error"] self.train_metric_extra_kwargs = [{"name": "err"}] self.val_metric_capts = ["Val.Err"] self.val_metric_names = ["Top1Error"] self.val_metric_extra_kwargs = [{"name": "err"}] self.saver_acc_ind = 0 self.train_transform = cifar10_train_transform self.val_transform = cifar10_val_transform self.test_transform = cifar10_val_transform self.train_generator = cifar10_train_generator self.val_generator = cifar10_val_generator self.test_generator = cifar10_val_generator self.ml_type = "imgcls" self.mean_rgb = (0.4914, 0.4822, 0.4465) self.std_rgb = (0.2023, 0.1994, 0.2010) # self.interpolation_msg = "nearest" def cifar10_train_transform(ds_metainfo, data_format="channels_last"): """ Create image transform sequence for training subset. Parameters: ---------- ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- ImageDataGenerator Image transform sequence. """ data_generator = ImageDataGenerator( preprocessing_function=(lambda img: img_normalization( img=img, mean_rgb=ds_metainfo.mean_rgb, std_rgb=ds_metainfo.std_rgb)), shear_range=0.2, zoom_range=0.2, horizontal_flip=True, data_format=data_format) return data_generator def cifar10_val_transform(ds_metainfo, data_format="channels_last"): """ Create image transform sequence for validation subset. Parameters: ---------- ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- ImageDataGenerator Image transform sequence. """ data_generator = ImageDataGenerator( preprocessing_function=(lambda img: img_normalization( img=img, mean_rgb=ds_metainfo.mean_rgb, std_rgb=ds_metainfo.std_rgb)), data_format=data_format) return data_generator def cifar10_train_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for training subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ assert(ds_metainfo is not None) (x_train, y_train), _ = cifar10.load_data() generator = data_generator.flow( x=x_train, y=y_train, batch_size=batch_size, shuffle=False) return generator def cifar10_val_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for validation subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ assert(ds_metainfo is not None) _, (x_test, y_test) = cifar10.load_data() generator = data_generator.flow( x=x_test, y=y_test, batch_size=batch_size, shuffle=False) return generator
4,434
27.798701
67
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/__init__.py
0
0
0
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/cub200_2011_cls_dataset.py
""" CUB-200-2011 classification dataset. """ import os import numpy as np import pandas as pd import threading from tensorflow.keras.preprocessing.image import ImageDataGenerator, DirectoryIterator from .cls_dataset import img_normalization from .imagenet1k_cls_dataset import ImageNet1KMetaInfo class CUBDirectoryIterator(DirectoryIterator): allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None} def __init__(self, directory, image_data_generator, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest', dtype='float32', mode="val"): super(CUBDirectoryIterator, self).set_processing_attrs( image_data_generator, target_size, color_mode, data_format, save_to_dir, save_prefix, save_format, subset, interpolation) root_dir_path = os.path.expanduser(directory) assert os.path.exists(root_dir_path) images_file_name = "images.txt" images_file_path = os.path.join(root_dir_path, images_file_name) if not os.path.exists(images_file_path): raise Exception("Images file doesn't exist: {}".format(images_file_name)) class_file_name = "image_class_labels.txt" class_file_path = os.path.join(root_dir_path, class_file_name) if not os.path.exists(class_file_path): raise Exception("Image class file doesn't exist: {}".format(class_file_name)) split_file_name = "train_test_split.txt" split_file_path = os.path.join(root_dir_path, split_file_name) if not os.path.exists(split_file_path): raise Exception("Split file doesn't exist: {}".format(split_file_name)) images_df = pd.read_csv( images_file_path, sep="\s+", header=None, index_col=False, names=["image_id", "image_path"], dtype={"image_id": np.int32, "image_path": np.unicode}) class_df = pd.read_csv( class_file_path, sep="\s+", header=None, index_col=False, names=["image_id", "class_id"], dtype={"image_id": np.int32, "class_id": np.uint8}) split_df = pd.read_csv( split_file_path, sep="\s+", header=None, index_col=False, names=["image_id", "split_flag"], dtype={"image_id": np.int32, "split_flag": np.uint8}) df = images_df.join(class_df, rsuffix="_class_df").join(split_df, rsuffix="_split_df") split_flag = 1 if mode == "train" else 0 subset_df = df[df.split_flag == split_flag] image_ids = subset_df["image_id"].values.astype(np.int32) class_ids = subset_df["class_id"].values.astype(np.int32) - 1 image_file_names = subset_df["image_path"].values.astype(np.unicode) images_dir_name = "images" self.images_dir_path = os.path.join(root_dir_path, images_dir_name) assert os.path.exists(self.images_dir_path) assert (len(image_ids) == len(class_ids)) self.class_mode = class_mode self.dtype = dtype self._filepaths = [os.path.join(self.images_dir_path, image_file_name) for image_file_name in image_file_names] self.classes = [int(class_id) for class_id in class_ids] self.n = len(class_ids) self.batch_size = batch_size self.seed = seed self.shuffle = shuffle self.batch_index = 0 self.total_batches_seen = 0 self.lock = threading.Lock() self.index_array = None self.index_generator = self._flow_index() class CubImageDataGenerator(ImageDataGenerator): def flow_from_directory(self, directory, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest', mode="val"): return CUBDirectoryIterator( directory, self, target_size=target_size, color_mode=color_mode, classes=classes, class_mode=class_mode, data_format=self.data_format, batch_size=batch_size, shuffle=shuffle, seed=seed, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, follow_links=follow_links, subset=subset, interpolation=interpolation, mode=mode) class CUB200MetaInfo(ImageNet1KMetaInfo): def __init__(self): super(CUB200MetaInfo, self).__init__() self.label = "CUB200_2011" self.short_label = "cub" self.root_dir_name = "CUB_200_2011" self.dataset_class = None self.num_training_samples = None self.num_classes = 200 self.train_metric_capts = ["Train.Err"] self.train_metric_names = ["Top1Error"] self.train_metric_extra_kwargs = [{"name": "err"}] self.val_metric_capts = ["Val.Err"] self.val_metric_names = ["Top1Error"] self.val_metric_extra_kwargs = [{"name": "err"}] self.saver_acc_ind = 0 self.train_transform = cub200_train_transform self.val_transform = cub200_val_transform self.test_transform = cub200_val_transform self.train_generator = cub200_train_generator self.val_generator = cub200_val_generator self.test_generator = cub200_val_generator self.net_extra_kwargs = {"aux": False} self.load_ignore_extra = True def add_dataset_parser_arguments(self, parser, work_dir_path): super(CUB200MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--no-aux", dest="no_aux", action="store_true", help="no `aux` mode in model") def update(self, args): super(CUB200MetaInfo, self).update(args) if args.no_aux: self.net_extra_kwargs = None self.load_ignore_extra = False def cub200_train_transform(ds_metainfo, data_format="channels_last"): """ Create image transform sequence for training subset. Parameters: ---------- ds_metainfo : DatasetMetaInfo CUB-200-2011 dataset metainfo. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- ImageDataGenerator Image transform sequence. """ data_generator = CubImageDataGenerator( preprocessing_function=(lambda img: img_normalization( img=img, mean_rgb=ds_metainfo.mean_rgb, std_rgb=ds_metainfo.std_rgb)), shear_range=0.2, zoom_range=0.2, horizontal_flip=True, data_format=data_format) return data_generator def cub200_val_transform(ds_metainfo, data_format="channels_last"): """ Create image transform sequence for validation subset. Parameters: ---------- ds_metainfo : DatasetMetaInfo CUB-200-2011 dataset metainfo. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- ImageDataGenerator Image transform sequence. """ data_generator = CubImageDataGenerator( preprocessing_function=(lambda img: img_normalization( img=img, mean_rgb=ds_metainfo.mean_rgb, std_rgb=ds_metainfo.std_rgb)), data_format=data_format) return data_generator def cub200_train_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for training subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ root = ds_metainfo.root_dir_path generator = data_generator.flow_from_directory( directory=root, target_size=ds_metainfo.input_image_size, class_mode="binary", batch_size=batch_size, shuffle=False, interpolation=ds_metainfo.interpolation_msg, mode="val") return generator def cub200_val_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for validation subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ root = ds_metainfo.root_dir_path generator = data_generator.flow_from_directory( directory=root, target_size=ds_metainfo.input_image_size, class_mode="binary", batch_size=batch_size, shuffle=False, interpolation=ds_metainfo.interpolation_msg, mode="val") return generator
10,350
32.070288
119
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/cityscapes_seg_dataset.py
""" Cityscapes semantic segmentation dataset. """ import os import numpy as np from PIL import Image from .seg_dataset import SegDataset from .voc_seg_dataset import VOCMetaInfo class CityscapesSegDataset(SegDataset): """ Cityscapes semantic segmentation dataset. Parameters: ---------- root : str Path to a folder with `leftImg8bit` and `gtFine` subfolders. mode : str, default 'train' 'train', 'val', 'test', or 'demo'. transform : callable, optional A function that transforms the image. """ def __init__(self, root, mode="train", transform=None, **kwargs): super(CityscapesSegDataset, self).__init__( root=root, mode=mode, transform=transform, **kwargs) image_dir_path = os.path.join(root, "leftImg8bit") mask_dir_path = os.path.join(root, "gtFine") assert os.path.exists(image_dir_path) and os.path.exists(mask_dir_path), "Please prepare dataset" mode_dir_name = "train" if mode == "train" else "val" image_dir_path = os.path.join(image_dir_path, mode_dir_name) # mask_dir_path = os.path.join(mask_dir_path, mode_dir_name) self.images = [] self.masks = [] for image_subdir_path, _, image_file_names in os.walk(image_dir_path): for image_file_name in image_file_names: if image_file_name.endswith(".png"): image_file_path = os.path.join(image_subdir_path, image_file_name) mask_file_name = image_file_name.replace("leftImg8bit", "gtFine_labelIds") mask_subdir_path = image_subdir_path.replace("leftImg8bit", "gtFine") mask_file_path = os.path.join(mask_subdir_path, mask_file_name) if os.path.isfile(mask_file_path): self.images.append(image_file_path) self.masks.append(mask_file_path) else: print("Cannot find the mask: {}".format(mask_file_path)) assert (len(self.images) == len(self.masks)) if len(self.images) == 0: raise RuntimeError("Found 0 images in subfolders of: {}\n".format(image_dir_path)) def __getitem__(self, index): image = Image.open(self.images[index]).convert("RGB") if self.mode == "demo": image = self._img_transform(image) if self.transform is not None: image = self.transform(image) return image, os.path.basename(self.images[index]) mask = Image.open(self.masks[index]) if self.mode == "train": image, mask = self._sync_transform(image, mask) elif self.mode == "val": image, mask = self._val_sync_transform(image, mask) else: assert (self.mode == "test") image = self._img_transform(image) mask = self._mask_transform(mask) if self.transform is not None: image = self.transform(image) return image, mask classes = 19 vague_idx = 19 use_vague = True background_idx = -1 ignore_bg = False _key = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 0, 1, -1, -1, 2, 3, 4, -1, -1, -1, 5, -1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -1, -1, 16, 17, 18]) _mapping = np.array(range(-1, len(_key) - 1)).astype(np.int32) @staticmethod def _class_to_index(mask): values = np.unique(mask) for value in values: assert(value in CityscapesSegDataset._mapping) index = np.digitize(mask.ravel(), CityscapesSegDataset._mapping, right=True) return CityscapesSegDataset._key[index].reshape(mask.shape) @staticmethod def _mask_transform(mask): np_mask = np.array(mask).astype(np.int32) np_mask = CityscapesSegDataset._class_to_index(np_mask) np_mask[np_mask == -1] = CityscapesSegDataset.vague_idx return np_mask def __len__(self): return len(self.images) class CityscapesMetaInfo(VOCMetaInfo): def __init__(self): super(CityscapesMetaInfo, self).__init__() self.label = "Cityscapes" self.short_label = "voc" self.root_dir_name = "cityscapes" self.dataset_class = CityscapesSegDataset self.num_classes = CityscapesSegDataset.classes self.test_metric_extra_kwargs = [ {"vague_idx": CityscapesSegDataset.vague_idx, "use_vague": CityscapesSegDataset.use_vague, "macro_average": False}, {"num_classes": CityscapesSegDataset.classes, "vague_idx": CityscapesSegDataset.vague_idx, "use_vague": CityscapesSegDataset.use_vague, "bg_idx": CityscapesSegDataset.background_idx, "ignore_bg": CityscapesSegDataset.ignore_bg, "macro_average": False}] self.test_net_extra_kwargs = self.net_extra_kwargs
5,121
36.386861
105
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/coco_seg_dataset.py
""" COCO semantic segmentation dataset. """ import os import logging import numpy as np from PIL import Image from tqdm import trange from .seg_dataset import SegDataset from .voc_seg_dataset import VOCMetaInfo class CocoSegDataset(SegDataset): """ COCO semantic segmentation dataset. Parameters: ---------- root : string Path to `annotations`, `train2017`, and `val2017` folders. mode : string, default 'train' 'train', 'val', 'test', or 'demo'. transform : callable, optional A function that transforms the image. """ def __init__(self, root, mode="train", transform=None, **kwargs): super(CocoSegDataset, self).__init__( root=root, mode=mode, transform=transform, **kwargs) mode_name = "train" if mode == "train" else "val" annotations_dir_path = os.path.join(root, "annotations") annotations_file_path = os.path.join(annotations_dir_path, "instances_" + mode_name + "2017.json") idx_file_path = os.path.join(annotations_dir_path, mode_name + "_idx.npy") self.image_dir_path = os.path.join(root, mode_name + "2017") from pycocotools.coco import COCO from pycocotools import mask as coco_mask self.coco = COCO(annotations_file_path) self.coco_mask = coco_mask if os.path.exists(idx_file_path): self.idx = np.load(idx_file_path) else: idx_list = list(self.coco.imgs.keys()) self.idx = self._filter_idx(idx_list, idx_file_path) self.transform = transform def __getitem__(self, index): image_idx = int(self.idx[index]) img_metadata = self.coco.loadImgs(image_idx)[0] image_file_name = img_metadata["file_name"] image_file_path = os.path.join(self.image_dir_path, image_file_name) image = Image.open(image_file_path).convert("RGB") if self.mode == "demo": image = self._img_transform(image) if self.transform is not None: image = self.transform(image) return image, os.path.basename(image_file_path) coco_target = self.coco.loadAnns(self.coco.getAnnIds(imgIds=image_idx)) mask = Image.fromarray(self._gen_seg_mask( coco_target, img_metadata["height"], img_metadata["width"])) if self.mode == "train": image, mask = self._sync_transform(image, mask) elif self.mode == "val": image, mask = self._val_sync_transform(image, mask) else: assert (self.mode == "test") image, mask = self._img_transform(image), self._mask_transform(mask) if self.transform is not None: image = self.transform(image) return image, mask def _gen_seg_mask(self, target, h, w): cat_list = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, 1, 64, 20, 63, 7, 72] mask = np.zeros((h, w), dtype=np.uint8) for instance in target: rle = self.coco_mask.frPyObjects(instance["segmentation"], h, w) m = self.coco_mask.decode(rle) cat = instance["category_id"] if cat in cat_list: c = cat_list.index(cat) else: continue if len(m.shape) < 3: mask[:, :] += (mask == 0) * (m * c) else: mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8) return mask def _filter_idx(self, idx, idx_file, pixels_thr=1000): logging.info("Filtering mask index") tbar = trange(len(idx)) filtered_idx = [] for i in tbar: img_id = idx[i] coco_target = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id)) img_metadata = self.coco.loadImgs(img_id)[0] mask = self._gen_seg_mask( coco_target, img_metadata["height"], img_metadata["width"]) if (mask > 0).sum() > pixels_thr: filtered_idx.append(img_id) tbar.set_description("Doing: {}/{}, got {} qualified images".format(i, len(idx), len(filtered_idx))) logging.info("Found number of qualified images: {}".format(len(filtered_idx))) np.save(idx_file, np.array(filtered_idx, np.int32)) return filtered_idx classes = 21 vague_idx = -1 use_vague = False background_idx = 0 ignore_bg = True @staticmethod def _mask_transform(mask): np_mask = np.array(mask).astype(np.int32) # print("min={}, max={}".format(np_mask.min(), np_mask.max())) return np_mask def __len__(self): return len(self.idx) class CocoSegMetaInfo(VOCMetaInfo): def __init__(self): super(CocoSegMetaInfo, self).__init__() self.label = "COCO" self.short_label = "coco" self.root_dir_name = "coco" self.dataset_class = CocoSegDataset self.num_classes = CocoSegDataset.classes self.test_metric_extra_kwargs = [ {"vague_idx": CocoSegDataset.vague_idx, "use_vague": CocoSegDataset.use_vague, "macro_average": False}, {"num_classes": CocoSegDataset.classes, "vague_idx": CocoSegDataset.vague_idx, "use_vague": CocoSegDataset.use_vague, "bg_idx": CocoSegDataset.background_idx, "ignore_bg": CocoSegDataset.ignore_bg, "macro_average": False}]
5,682
34.298137
112
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/voc_seg_dataset.py
""" Pascal VOC2012 semantic segmentation dataset. """ import os import numpy as np from PIL import Image from chainer import get_dtype from .seg_dataset import SegDataset, SegImageDataGenerator from .dataset_metainfo import DatasetMetaInfo class VOCSegDataset(SegDataset): """ Pascal VOC2012 semantic segmentation dataset. Parameters: ---------- root : str Path to VOCdevkit folder. mode : str, default 'train' 'train', 'val', 'test', or 'demo'. transform : callable, optional A function that transforms the image. """ def __init__(self, root, mode="train", transform=None, **kwargs): super(VOCSegDataset, self).__init__( root=root, mode=mode, transform=transform, **kwargs) base_dir_path = os.path.join(root, "VOC2012") image_dir_path = os.path.join(base_dir_path, "JPEGImages") mask_dir_path = os.path.join(base_dir_path, "SegmentationClass") splits_dir_path = os.path.join(base_dir_path, "ImageSets", "Segmentation") if mode == "train": split_file_path = os.path.join(splits_dir_path, "train.txt") elif mode in ("val", "test", "demo"): split_file_path = os.path.join(splits_dir_path, "val.txt") else: raise RuntimeError("Unknown dataset splitting mode") self.images = [] self.masks = [] with open(os.path.join(split_file_path), "r") as lines: for line in lines: image_file_path = os.path.join(image_dir_path, line.rstrip('\n') + ".jpg") assert os.path.isfile(image_file_path) self.images.append(image_file_path) mask_file_path = os.path.join(mask_dir_path, line.rstrip('\n') + ".png") assert os.path.isfile(mask_file_path) self.masks.append(mask_file_path) assert (len(self.images) == len(self.masks)) def __getitem__(self, index): image = Image.open(self.images[index]).convert("RGB") if self.mode == "demo": image = self._img_transform(image) if self.transform is not None: image = self.transform(image) return image, os.path.basename(self.images[index]) mask = Image.open(self.masks[index]) if self.mode == "train": image, mask = self._sync_transform(image, mask) elif self.mode == "val": image, mask = self._val_sync_transform(image, mask) else: assert self.mode == "test" image, mask = self._img_transform(image), self._mask_transform(mask) if self.transform is not None: image = self.transform(image) # print("---> image.shape={}".format(image.shape)) # print("---> mask.shape={}".format(mask.shape)) return image, mask classes = 21 vague_idx = 255 use_vague = True background_idx = 0 ignore_bg = True @staticmethod def _mask_transform(mask): np_mask = np.array(mask).astype(np.int32) # np_mask[np_mask == 255] = VOCSegDataset.vague_idx return np_mask def __len__(self): return len(self.images) class VOCSegTrainTransform(object): """ ImageNet-1K training transform. """ def __init__(self, ds_metainfo, mean_rgb=(0.485, 0.456, 0.406), std_rgb=(0.229, 0.224, 0.225)): assert (ds_metainfo is not None) self.mean = np.array(mean_rgb, np.float32)[np.newaxis, np.newaxis, :] self.std = np.array(std_rgb, np.float32)[np.newaxis, np.newaxis, :] def __call__(self, img): dtype = get_dtype(None) img = img.astype(dtype) img *= 1.0 / 255.0 img -= self.mean img /= self.std return img class VOCSegTestTransform(object): """ ImageNet-1K validation transform. """ def __init__(self, ds_metainfo, mean_rgb=(0.485, 0.456, 0.406), std_rgb=(0.229, 0.224, 0.225)): assert (ds_metainfo is not None) self.mean = np.array(mean_rgb, np.float32)[np.newaxis, np.newaxis, :] self.std = np.array(std_rgb, np.float32)[np.newaxis, np.newaxis, :] def __call__(self, img): dtype = get_dtype(None) img = img.astype(dtype) img *= 1.0 / 255.0 img -= self.mean img /= self.std return img class VOCMetaInfo(DatasetMetaInfo): def __init__(self): super(VOCMetaInfo, self).__init__() self.label = "VOC" self.short_label = "voc" self.root_dir_name = "voc" self.dataset_class = VOCSegDataset self.num_training_samples = None self.in_channels = 3 self.num_classes = VOCSegDataset.classes self.input_image_size = (480, 480) self.train_metric_capts = None self.train_metric_names = None self.train_metric_extra_kwargs = None self.val_metric_capts = None self.val_metric_names = None self.test_metric_extra_kwargs = None self.test_metric_capts = ["Val.PixAcc", "Val.IoU"] self.test_metric_names = ["PixelAccuracyMetric", "MeanIoUMetric"] self.test_metric_extra_kwargs = [ {"vague_idx": VOCSegDataset.vague_idx, "use_vague": VOCSegDataset.use_vague, "macro_average": False}, {"num_classes": VOCSegDataset.classes, "vague_idx": VOCSegDataset.vague_idx, "use_vague": VOCSegDataset.use_vague, "bg_idx": VOCSegDataset.background_idx, "ignore_bg": VOCSegDataset.ignore_bg, "macro_average": False}] self.saver_acc_ind = 1 self.train_transform = voc_train_transform self.val_transform = voc_val_transform self.test_transform = voc_val_transform self.train_transform2 = VOCSegTrainTransform self.val_transform2 = VOCSegTestTransform self.test_transform2 = VOCSegTestTransform self.train_generator = voc_train_generator self.val_generator = voc_val_generator self.test_generator = voc_test_generator self.ml_type = "imgseg" self.allow_hybridize = False self.net_extra_kwargs = {"aux": False, "fixed_size": False} self.load_ignore_extra = True self.image_base_size = 520 self.image_crop_size = 480 def add_dataset_parser_arguments(self, parser, work_dir_path): super(VOCMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--image-base-size", type=int, default=520, help="base image size") parser.add_argument( "--image-crop-size", type=int, default=480, help="crop image size") def update(self, args): super(VOCMetaInfo, self).update(args) self.image_base_size = args.image_base_size self.image_crop_size = args.image_crop_size def voc_train_transform(ds_metainfo, data_format="channels_last"): """ Create image transform sequence for training subset. Parameters: ---------- ds_metainfo : DatasetMetaInfo Pascal VOC2012 dataset metainfo. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- ImageDataGenerator Image transform sequence. """ data_generator = SegImageDataGenerator( preprocessing_function=(lambda img: VOCSegTrainTransform(ds_metainfo=ds_metainfo)(img)), data_format=data_format) return data_generator def voc_val_transform(ds_metainfo, data_format="channels_last"): """ Create image transform sequence for validation subset. Parameters: ---------- ds_metainfo : DatasetMetaInfo Pascal VOC2012 dataset metainfo. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. Returns: ------- ImageDataGenerator Image transform sequence. """ data_generator = SegImageDataGenerator( preprocessing_function=(lambda img: VOCSegTestTransform(ds_metainfo=ds_metainfo)(img)), data_format=data_format) return data_generator def voc_train_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for training subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo Pascal VOC2012 dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ split = "train" root = ds_metainfo.root_dir_path root = os.path.join(root, split) generator = data_generator.flow_from_directory( directory=root, target_size=ds_metainfo.input_image_size, class_mode="binary", batch_size=batch_size, shuffle=False, interpolation=ds_metainfo.interpolation_msg, dataset=None) return generator def voc_val_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for validation subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo Pascal VOC2012 dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ split = "val" root = ds_metainfo.root_dir_path root = os.path.join(root, split) generator = data_generator.flow_from_directory( directory=root, target_size=ds_metainfo.input_image_size, class_mode="binary", batch_size=batch_size, shuffle=False, interpolation="bilinear", dataset=ds_metainfo.dataset_class( root=ds_metainfo.root_dir_path, mode="val", transform=VOCSegTestTransform( ds_metainfo=ds_metainfo))) return generator def voc_test_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for testing subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo Pascal VOC2012 dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ split = "val" root = ds_metainfo.root_dir_path root = os.path.join(root, split) generator = data_generator.flow_from_directory( directory=root, target_size=ds_metainfo.input_image_size, class_mode="binary", batch_size=batch_size, shuffle=False, interpolation="bilinear", dataset=ds_metainfo.dataset_class( root=ds_metainfo.root_dir_path, mode="test", transform=VOCSegTestTransform( ds_metainfo=ds_metainfo))) return generator
11,479
30.195652
96
py
imgclsmob
imgclsmob-master/tensorflow2/datasets/cifar100_cls_dataset.py
""" CIFAR-100 classification dataset. """ from tensorflow.keras.datasets import cifar100 from .cifar10_cls_dataset import CIFAR10MetaInfo class CIFAR100MetaInfo(CIFAR10MetaInfo): def __init__(self): super(CIFAR100MetaInfo, self).__init__() self.label = "CIFAR100" self.root_dir_name = "cifar100" self.num_classes = 100 self.train_generator = cifar100_train_generator self.val_generator = cifar100_val_generator self.test_generator = cifar100_val_generator def cifar100_train_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for training subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ assert(ds_metainfo is not None) (x_train, y_train), _ = cifar100.load_data() generator = data_generator.flow( x=x_train, y=y_train, batch_size=batch_size, shuffle=False) return generator def cifar100_val_generator(data_generator, ds_metainfo, batch_size): """ Create image generator for validation subset. Parameters: ---------- data_generator : ImageDataGenerator Image transform sequence. ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. batch_size : int Batch size. Returns: ------- Sequential Image transform sequence. """ assert(ds_metainfo is not None) _, (x_test, y_test) = cifar100.load_data() generator = data_generator.flow( x=x_test, y=y_test, batch_size=batch_size, shuffle=False) return generator
1,963
24.179487
55
py
imgclsmob
imgclsmob-master/common/logger_utils.py
""" Routines for logging subsystem initialization. """ __all__ = ['initialize_logging'] import os import sys import logging from .env_stats import get_env_stats def prepare_logger(logging_dir_path, logging_file_name): """ Prepare logger. Parameters: ---------- logging_dir_path : str Path to logging directory. logging_file_name : str Name of logging file. Returns: ------- Logger Logger instance. bool If the logging file exist. """ logging.basicConfig() logger = logging.getLogger() logger.setLevel(logging.INFO) # sh = logging.StreamHandler() # logger.addHandler(sh) log_file_exist = False if logging_dir_path is not None and logging_dir_path: log_file_path = os.path.join(logging_dir_path, logging_file_name) if not os.path.exists(logging_dir_path): os.makedirs(logging_dir_path) log_file_exist = False else: log_file_exist = (os.path.exists(log_file_path) and os.path.getsize(log_file_path) > 0) fh = logging.FileHandler(log_file_path) logger.addHandler(fh) if log_file_exist: logging.info("--------------------------------") return logger, log_file_exist def initialize_logging(logging_dir_path, logging_file_name, script_args, log_packages, log_pip_packages): """ Initialize logging subsystem. Parameters: ---------- logging_dir_path : str Path to logging directory. logging_file_name : str Name of logging file. script_args : ArgumentParser Main script arguments. log_packages : bool Whether to log packages info. log_pip_packages : bool Whether to log pip-packages info. Returns: ------- Logger Logger instance. bool If the logging file exist. """ logger, log_file_exist = prepare_logger( logging_dir_path=logging_dir_path, logging_file_name=logging_file_name) logging.info("Script command line:\n{}".format(" ".join(sys.argv))) logging.info("Script arguments:\n{}".format(script_args)) packages = log_packages.replace(" ", "").split(",") if type(log_packages) == str else log_packages pip_packages = log_pip_packages.replace(" ", "").split(",") if type(log_pip_packages) == str else log_pip_packages if (log_packages is not None) and (log_pip_packages is not None): logging.info("Env_stats:\n{}".format(get_env_stats( packages=packages, pip_packages=pip_packages))) return logger, log_file_exist
2,723
28.608696
118
py
imgclsmob
imgclsmob-master/common/train_log_param_saver.py
import os import shutil class TrainLogParamSaver(object): """ Train logger does the following: 1. save several the last model checkpoints, for disaster recovery, 2. save several the best model checkpoints, to prevent overfitting, 3. save pure evaluation metric values to log-file for observer. Parameters: ---------- checkpoint_file_name_prefix : str prefix for checkpoint file name (without parent dir) last_checkpoint_file_name_suffix : str or None suffix for last checkpoint file name if None then checkpoint_file_name_prefix is not modified best_checkpoint_file_name_suffix : str or None suffix for best checkpoint file name last_checkpoint_dir_path : str directory path for saving the last checkpoint files best_checkpoint_dir_path : str or None directory name for saving the best checkpoint files if None then best_checkpoint_dir_path = last_checkpoint_dir_path last_checkpoint_file_count : int count of the last checkpoint files best_checkpoint_file_count : int count of the best checkpoint files checkpoint_file_save_callback : function or None Callback for real saving of checkpoint file checkpoint_file_exts : tuple of str List of checkpoint file extensions save_interval : int Interval of checkpoint file saving num_epochs : int Number of epochs for saving last checkpoint if save_interval > 1 bigger : list of bool Should be bigger for each value of evaluation metric values mask : list of bool or None evaluation metric values that should be taken into account score_log_file_path : str or None file path to score log file score_log_attempt_value : int number of current attempt (used for comparing training curves for various hyperparameters) best_map_log_file_path : str or None file path to best map log file """ def __init__(self, checkpoint_file_name_prefix="model", last_checkpoint_file_name_suffix="last", best_checkpoint_file_name_suffix=None, last_checkpoint_dir_path="", best_checkpoint_dir_path=None, last_checkpoint_file_count=2, best_checkpoint_file_count=2, checkpoint_file_save_callback=None, checkpoint_file_exts=(".params",), save_interval=1, num_epochs=-1, param_names=None, acc_ind=0, # bigger=[True], # mask=None, score_log_file_path=None, score_log_attempt_value=1, best_map_log_file_path=None): if not os.path.exists(last_checkpoint_dir_path): os.makedirs(last_checkpoint_dir_path) if best_checkpoint_dir_path is None: best_checkpoint_dir_path = last_checkpoint_dir_path assert ((last_checkpoint_file_name_suffix != best_checkpoint_file_name_suffix) and (not ((last_checkpoint_file_name_suffix is None) and (best_checkpoint_file_name_suffix is None)))) else: assert (last_checkpoint_dir_path != best_checkpoint_dir_path) if not os.path.exists(best_checkpoint_dir_path): os.makedirs(best_checkpoint_dir_path) self.last_checkpoints_prefix = self._create_checkpoint_file_path_full_prefix( checkpoint_dir_path=last_checkpoint_dir_path, checkpoint_file_name_prefix=checkpoint_file_name_prefix, checkpoint_file_name_suffix=last_checkpoint_file_name_suffix) self.best_checkpoints_prefix = self._create_checkpoint_file_path_full_prefix( checkpoint_dir_path=best_checkpoint_dir_path, checkpoint_file_name_prefix=checkpoint_file_name_prefix, checkpoint_file_name_suffix=best_checkpoint_file_name_suffix) assert (last_checkpoint_file_count >= 0) self.last_checkpoint_file_count = last_checkpoint_file_count assert (best_checkpoint_file_count >= 0) self.best_checkpoint_file_count = best_checkpoint_file_count self.checkpoint_file_save_callback = checkpoint_file_save_callback self.checkpoint_file_exts = checkpoint_file_exts assert (save_interval > 0) self.save_interval = save_interval assert (num_epochs > 0) self.num_epochs = num_epochs assert (type(param_names) == list) self.param_names = param_names assert (acc_ind >= 0) and (acc_ind < len(param_names)) self.acc_ind = acc_ind # assert isinstance(bigger, list) # self.bigger = np.array(bigger) # if mask is None: # self.mask = np.ones_like(self.bigger) # else: # assert isinstance(mask, list) # assert (len(mask) == len(bigger)) # self.mask = np.array(mask) if score_log_file_path is not None: self.score_log_file_exist = (os.path.exists(score_log_file_path) and os.path.getsize(score_log_file_path) > 0) self.score_log_file = open(score_log_file_path, "a") if not self.score_log_file_exist: titles = ["Attempt", "Epoch"] + self.param_names self.score_log_file.write("\t".join(titles)) self.score_log_file.flush() else: self.score_log_file = None self.score_log_attempt_value = score_log_attempt_value if best_map_log_file_path is not None: self.best_map_log_file_exist = (os.path.exists(best_map_log_file_path) and os.path.getsize(best_map_log_file_path) > 0) self.best_map_log_file = open(best_map_log_file_path, "a") if not self.best_map_log_file_exist: titles = ["Attempt", "Epoch", self.param_names[self.acc_ind]] self.best_map_log_file.write("\t".join(titles)) self.best_map_log_file.flush() else: self.best_map_log_file = None self.best_eval_metric_value = None self.best_eval_metric_epoch = None self.last_checkpoint_params_file_stems = [] self.best_checkpoint_params_file_stems = [] self.can_save = (self.checkpoint_file_save_callback is not None) def __del__(self): """ Releasing resources. """ if self.score_log_file is not None: self.score_log_file.close() if self.best_map_log_file is not None: self.best_map_log_file.close() def epoch_test_end_callback(self, epoch1, params, **kwargs): curr_acc = params[self.acc_ind] if self.can_save: last_checkpoint_params_file_stem = None if (epoch1 % self.save_interval == 0) or (epoch1 == self.num_epochs): last_checkpoint_params_file_stem = self._get_last_checkpoint_params_file_stem(epoch1, curr_acc) self.checkpoint_file_save_callback(last_checkpoint_params_file_stem, **kwargs) self.last_checkpoint_params_file_stems.append(last_checkpoint_params_file_stem) if len(self.last_checkpoint_params_file_stems) > self.last_checkpoint_file_count: removed_checkpoint_file_stem = self.last_checkpoint_params_file_stems[0] for ext in self.checkpoint_file_exts: removed_checkpoint_file_path = removed_checkpoint_file_stem + ext if os.path.exists(removed_checkpoint_file_path): os.remove(removed_checkpoint_file_path) del self.last_checkpoint_params_file_stems[0] if (self.best_eval_metric_value is None) or (curr_acc < self.best_eval_metric_value): self.best_eval_metric_value = curr_acc self.best_eval_metric_epoch = epoch1 best_checkpoint_params_file_stem = self._get_best_checkpoint_params_file_stem(epoch1, curr_acc) if last_checkpoint_params_file_stem is not None: for ext in self.checkpoint_file_exts: last_checkpoint_params_file_path = last_checkpoint_params_file_stem + ext best_checkpoint_params_file_path = best_checkpoint_params_file_stem + ext assert (os.path.exists(last_checkpoint_params_file_path)) shutil.copy( src=last_checkpoint_params_file_path, dst=best_checkpoint_params_file_path) else: self.checkpoint_file_save_callback(best_checkpoint_params_file_stem, **kwargs) self.best_checkpoint_params_file_stems.append(best_checkpoint_params_file_stem) if len(self.best_checkpoint_params_file_stems) > self.best_checkpoint_file_count: removed_checkpoint_file_stem = self.best_checkpoint_params_file_stems[0] for ext in self.checkpoint_file_exts: removed_checkpoint_file_path = removed_checkpoint_file_stem + ext if os.path.exists(removed_checkpoint_file_path): os.remove(removed_checkpoint_file_path) del self.best_checkpoint_params_file_stems[0] if self.best_map_log_file is not None: self.best_map_log_file.write('\n{:02d}\t{:04d}\t{:.4f}'.format( self.score_log_attempt_value, epoch1, curr_acc)) self.best_map_log_file.flush() if self.score_log_file is not None: score_log_file_row = "\n" + "\t".join([str(self.score_log_attempt_value), str(epoch1)] + list(map(lambda x: "{:.4f}".format(x), params))) self.score_log_file.write(score_log_file_row) self.score_log_file.flush() @staticmethod def _create_checkpoint_file_path_full_prefix(checkpoint_dir_path, checkpoint_file_name_prefix, checkpoint_file_name_suffix): checkpoint_file_name_full_prefix = checkpoint_file_name_prefix if checkpoint_file_name_suffix is not None: checkpoint_file_name_full_prefix += ("_" + checkpoint_file_name_suffix) return os.path.join( checkpoint_dir_path, checkpoint_file_name_full_prefix) @staticmethod def _get_checkpoint_params_file_stem(checkpoint_file_path_prefix, epoch, acc): return "{}_{:04d}_{:.4f}".format(checkpoint_file_path_prefix, epoch, acc) def _get_last_checkpoint_params_file_stem(self, epoch, acc): return self._get_checkpoint_params_file_stem(self.last_checkpoints_prefix, epoch, acc) def _get_best_checkpoint_params_file_stem(self, epoch, acc): return self._get_checkpoint_params_file_stem(self.best_checkpoints_prefix, epoch, acc)
11,279
47
111
py
imgclsmob
imgclsmob-master/common/__init__.py
0
0
0
py
imgclsmob
imgclsmob-master/common/env_stats.py
""" Routines for logging environment setting. """ __all__ = ['get_env_stats'] import os import sys import subprocess import platform import json def get_pip_versions(package_list, python_version=""): """ Get packages information by using 'pip show' command. Parameters: ---------- package_list : list of str List of package names. python_version : str, default '' Python version ('2', '3', '') appended to 'pip' command. Returns: ------- dict Dictionary with module descriptions. """ module_versions = {} for module in package_list: try: out_bytes = subprocess.check_output([ "pip{0}".format(python_version), "show", module]) out_text = out_bytes.decode("utf-8").strip() except (subprocess.CalledProcessError, OSError): out_text = None module_versions[module] = out_text return module_versions def get_package_versions(package_list): """ Get packages information by inspecting __version__ attribute. Parameters: ---------- package_list : list of str List of package names. Returns: ------- dict Dictionary with module descriptions. """ module_versions = {} for module in package_list: try: module_versions[module] = __import__(module).__version__ except ImportError: module_versions[module] = None except AttributeError: module_versions[module] = "unknown" return module_versions def get_pyenv_info(packages, pip_packages, python_ver, pwd, git, sys_info=True): """ Get all available information about Python environment: packages information, Python version, current path, git revision. Parameters: ---------- packages : list of str list of package names to inspect only __version__. pip_packages : list of str List of package names to inspect by 'pip show'. python_ver : bool Whether to show python version. pwd : bool Whether to show pwd. git : bool Whether to show git info. sys_info : bool, default True Whether to show platform info. Returns: ------- dict Dictionary with module descriptions. """ pyenv_info = {} python_version = sys.version_info[0] # get versions from __version__ string modules_versions = get_package_versions(packages) pyenv_info.update(modules_versions) # get versions from pip if type(pip_packages) == list and len(pip_packages) > 0 and pip_packages[0]: modules_versions_pip = get_pip_versions(pip_packages, python_version) pyenv_info.update(modules_versions_pip) if python_ver: # set python version try: pyenv_info["python"] = "{0}.{1}.{2}".format(*sys.version_info[0:3]) except BaseException: pyenv_info["python"] = "unknown" if pwd: # set current path pyenv_info["pwd"] = os.path.dirname(os.path.realpath(__file__)) if git: # set git revision of the code try: if os.name == "nt": command = "cmd /V /C \"cd {} && git log -n 1\"".format(pyenv_info["pwd"]) else: command = ["cd {}; git log -n 1".format(pyenv_info["pwd"])] out_bytes = subprocess.check_output(command, shell=True) out_text = out_bytes.decode("utf-8") except BaseException: out_text = "unknown" pyenv_info["git"] = out_text.strip() if sys_info: pyenv_info["platform"] = platform.platform() return pyenv_info def pretty_print_dict2str(d): """ Pretty print of dictionary d to json-formated string. Parameters: ---------- d : dict Dictionary with module descriptions. Returns: ------- str Resulted string. """ out_text = json.dumps(d, indent=4) return out_text def get_env_stats(packages, pip_packages, python_ver=True, pwd=True, git=True, sys_info=True): """ Get environment statistics. Parameters: ---------- packages : list of str list of package names to inspect only __version__. pip_packages : list of str List of package names to inspect by 'pip show'. python_ver : bool Whether to show python version. pwd : bool, default True Whether to show pwd. git : bool, default True Whether to show git info. sys_info : bool, default True Whether to show platform info. Returns: ------- str Resulted string with information. """ package_versions = get_pyenv_info(packages, pip_packages, python_ver, pwd, git, sys_info) return pretty_print_dict2str(package_versions)
5,041
25.397906
111
py
imgclsmob
imgclsmob-master/examples/demo_tf2.py
""" Script for evaluating trained model on TensorFlow 2.0 / ImageNet-1K (demo mode). """ import math import argparse import numpy as np import cv2 import tensorflow as tf from gluoncv.data import ImageNet1kAttr from tf2cv.model_provider import get_model as tf2cv_get_model def parse_args(): """ Create python script parameters. Returns: ------- ArgumentParser Resulted args. """ parser = argparse.ArgumentParser( description="Evaluate an ImageNet-1K model on TensorFlow 2.0 (demo mode)", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "--model", type=str, required=True, help="type of model to use. see model_provider for options") parser.add_argument( "--image", type=str, required=True, help="path to testing image") parser.add_argument( "--num-gpus", type=int, default=0, help="number of gpus to use") parser.add_argument( "--input-size", type=int, default=224, help="size of the input for model") parser.add_argument( "--resize-inv-factor", type=float, default=0.875, help="inverted ratio for input image crop") parser.add_argument( "--mean-rgb", nargs=3, type=float, default=(0.485, 0.456, 0.406), help="Mean of RGB channels in the dataset") parser.add_argument( "--std-rgb", nargs=3, type=float, default=(0.229, 0.224, 0.225), help="STD of RGB channels in the dataset") args = parser.parse_args() return args def main(): """ Main body of script. """ args = parse_args() # Load a testing image: image = cv2.imread(args.image, flags=cv2.IMREAD_COLOR) # cv2.imshow("image", image) # cv2.waitKey(0) # cv2.destroyAllWindows() image = cv2.cvtColor(image, code=cv2.COLOR_BGR2RGB) # Resize image with keeping aspect ratio: resize_value = int(math.ceil(float(args.input_size) / args.resize_inv_factor)) h, w = image.shape[:2] if not ((w == resize_value and w <= h) or (h == resize_value and h <= w)): resize_size = (resize_value, int(resize_value * h / w)) if w < h else (int(resize_value * w / h), resize_value) image = cv2.resize(image, dsize=resize_size, interpolation=cv2.INTER_LINEAR) # Center crop of the image: h, w = image.shape[:2] th, tw = args.input_size, args.input_size ih = int(round(0.5 * (h - th))) jw = int(round(0.5 * (w - tw))) image = image[ih:(ih + th), jw:(jw + tw), :] # cv2.imshow("image2", image) # cv2.waitKey(0) # cv2.destroyAllWindows() # Convert image to a float tensor and normalize it: x = image.astype(np.float32) x = x / 255.0 x = (x - np.array(args.mean_rgb)) / np.array(args.std_rgb) # Set No-GPU mode: if args.num_gpus == 0: tf.config.set_visible_devices([], "GPU") # Convert the tensor to a TF tensor: x = np.expand_dims(x, axis=0) x = tf.convert_to_tensor(x, dtype=np.float32) # Create model with loading pretrained weights: net = tf2cv_get_model(args.model, pretrained=True) # Evaluate the network: y = net(x) probs = tf.nn.softmax(y) # Show results: top_k = 5 probs_np = probs.numpy().squeeze(axis=0) top_k_inds = probs_np.argsort()[::-1][:top_k] classes = ImageNet1kAttr().classes print("The input picture is classified to be:") for k in range(top_k): print("{idx}: [{class_name}], with probability {prob:.3f}.".format( idx=(k + 1), class_name=classes[top_k_inds[k]], prob=probs_np[top_k_inds[k]])) if __name__ == "__main__": main()
3,799
27.571429
119
py
imgclsmob
imgclsmob-master/examples/convert_tf2_to_tfl.py
""" Script for converting trained model from TensorFlow 2.0 to TensorFlow Lite. """ import argparse import numpy as np import tensorflow as tf from tf2cv.model_provider import get_model as tf2cv_get_model from tensorflow2.utils import prepare_model def parse_args(): """ Create python script parameters. Returns: ------- ArgumentParser Resulted args. """ parser = argparse.ArgumentParser( description="Converting a model from TensorFlow 2.0 to TensorFlow Lite", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "--model", type=str, required=True, help="type of model to use. see model_provider for options") parser.add_argument( "--input", type=str, help="path to model weights") parser.add_argument( "--input-shape", type=int, default=(1, 640, 480, 3), help="input tensor shape") parser.add_argument( "--output-dir", type=str, help="path to dir for output TFL file") args = parser.parse_args() return args def main(): """ Main body of script. """ gpus = tf.config.experimental.list_physical_devices("GPU") if gpus: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) args = parse_args() if args.input: net_extra_kwargs = {"in_size": args.input_shape[1:3]} model = prepare_model( model_name=args.model, use_pretrained=False, pretrained_model_file_path=args.input, net_extra_kwargs=net_extra_kwargs) else: model = tf2cv_get_model( args.model, pretrained=True) x = tf.zeros(shape=args.input_shape) _ = model.predict(x) # Convert the model. converter = tf.lite.TFLiteConverter.from_keras_model(model) # converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_LATENCY] # converter.optimizations = [tf.lite.Optimize.DEFAULT] # dataset = np.load(args.dataset) # def representative_dataset_gen(): # for i in range(len(dataset)): # yield [dataset[i:i + 1]] # converter.representative_dataset = representative_dataset_gen # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] # converter.inference_input_type = tf.int8 # converter.inference_output_type = tf.int8 # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] tflite_model = converter.convert() if args.output_dir is not None: open("{}/{}.tflite".format(args.output_dir, args.model), "wb").write(tflite_model) # Load TFLite model and allocate tensors. interpreter = tf.lite.Interpreter(model_content=tflite_model) interpreter.allocate_tensors() # Get input and output tensors. input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() # Test the TensorFlow Lite model on random input data. input_shape = input_details[0]["shape"] input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32) interpreter.set_tensor(input_details[0]["index"], input_data) interpreter.invoke() # The function `get_tensor()` returns a copy of the tensor data. # Use `tensor()` in order to get a pointer to the tensor. tflite_results = interpreter.get_tensor(output_details[0]["index"]) # Test the TensorFlow model on random input data. tf_results = model(tf.constant(input_data)) # Compare the result. for tf_result, tflite_result in zip(tf_results, tflite_results): np.testing.assert_almost_equal(tf_result[0], tflite_result, decimal=5) print("All OK.") if __name__ == "__main__": main()
3,835
29.204724
106
py
imgclsmob
imgclsmob-master/examples/demo_pt.py
""" Script for evaluating trained model on PyTorch / ImageNet-1K (demo mode). """ import math import argparse import numpy as np import cv2 import torch from gluoncv.data import ImageNet1kAttr from pytorchcv.model_provider import get_model as ptcv_get_model def parse_args(): """ Create python script parameters. Returns: ------- ArgumentParser Resulted args. """ parser = argparse.ArgumentParser( description="Evaluate an ImageNet-1K model on PyTorch (demo mode)", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "--model", type=str, required=True, help="type of model to use. see model_provider for options") parser.add_argument( "--image", type=str, required=True, help="path to testing image") parser.add_argument( "--num-gpus", type=int, default=0, help="number of gpus to use") parser.add_argument( "--input-size", type=int, default=224, help="size of the input for model") parser.add_argument( "--resize-inv-factor", type=float, default=0.875, help="inverted ratio for input image crop") parser.add_argument( "--mean-rgb", nargs=3, type=float, default=(0.485, 0.456, 0.406), help="Mean of RGB channels in the dataset") parser.add_argument( "--std-rgb", nargs=3, type=float, default=(0.229, 0.224, 0.225), help="STD of RGB channels in the dataset") args = parser.parse_args() return args def main(): """ Main body of script. """ args = parse_args() # Load a testing image: image = cv2.imread(args.image, flags=cv2.IMREAD_COLOR) # cv2.imshow("image", image) # cv2.waitKey(0) # cv2.destroyAllWindows() image = cv2.cvtColor(image, code=cv2.COLOR_BGR2RGB) # Resize image with keeping aspect ratio: resize_value = int(math.ceil(float(args.input_size) / args.resize_inv_factor)) h, w = image.shape[:2] if not ((w == resize_value and w <= h) or (h == resize_value and h <= w)): resize_size = (resize_value, int(resize_value * h / w)) if w < h else (int(resize_value * w / h), resize_value) image = cv2.resize(image, dsize=resize_size, interpolation=cv2.INTER_LINEAR) # Center crop of the image: h, w = image.shape[:2] th, tw = args.input_size, args.input_size ih = int(round(0.5 * (h - th))) jw = int(round(0.5 * (w - tw))) image = image[ih:(ih + th), jw:(jw + tw), :] # cv2.imshow("image2", image) # cv2.waitKey(0) # cv2.destroyAllWindows() # Convert image to a float tensor and normalize it: x = image.astype(np.float32) x = x / 255.0 x = (x - np.array(args.mean_rgb)) / np.array(args.std_rgb) # Create `use_cuda` flag: use_cuda = (args.num_gpus > 0) # Convert the tensor to a Pytorch tensor: x = x.transpose(2, 0, 1) x = np.expand_dims(x, axis=0) x = torch.FloatTensor(x) if use_cuda: x = x.cuda() # Create model with loading pretrained weights: net = ptcv_get_model(args.model, pretrained=True) net.eval() if use_cuda: net = net.cuda() # Evaluate the network: y = net(x) probs = torch.nn.Softmax(dim=-1)(y) # Show results: top_k = 5 probs_np = probs.cpu().detach().numpy().squeeze(axis=0) top_k_inds = probs_np.argsort()[::-1][:top_k] classes = ImageNet1kAttr().classes print("The input picture is classified to be:") for k in range(top_k): print("{idx}: [{class_name}], with probability {prob:.3f}.".format( idx=(k + 1), class_name=classes[top_k_inds[k]], prob=probs_np[top_k_inds[k]])) if __name__ == "__main__": main()
3,876
27.094203
119
py
imgclsmob
imgclsmob-master/examples/demo_gl.py
""" Script for evaluating trained model on MXNet/Gluon / ImageNet-1K (demo mode). """ import math import argparse import numpy as np import cv2 import mxnet as mx from gluoncv.data import ImageNet1kAttr from gluoncv2.model_provider import get_model as glcv2_get_model def parse_args(): """ Create python script parameters. Returns: ------- ArgumentParser Resulted args. """ parser = argparse.ArgumentParser( description="Evaluate an ImageNet-1K model on Gluon (demo mode)", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "--model", type=str, required=True, help="type of model to use. see model_provider for options") parser.add_argument( "--image", type=str, required=True, help="path to testing image") parser.add_argument( "--num-gpus", type=int, default=0, help="number of gpus to use") parser.add_argument( "--input-size", type=int, default=224, help="size of the input for model") parser.add_argument( "--resize-inv-factor", type=float, default=0.875, help="inverted ratio for input image crop") parser.add_argument( "--mean-rgb", nargs=3, type=float, default=(0.485, 0.456, 0.406), help="Mean of RGB channels in the dataset") parser.add_argument( "--std-rgb", nargs=3, type=float, default=(0.229, 0.224, 0.225), help="STD of RGB channels in the dataset") args = parser.parse_args() return args def main(): """ Main body of script. """ args = parse_args() # Load a testing image: image = cv2.imread(args.image, flags=cv2.IMREAD_COLOR) # cv2.imshow("image", image) # cv2.waitKey(0) # cv2.destroyAllWindows() image = cv2.cvtColor(image, code=cv2.COLOR_BGR2RGB) # Resize image with keeping aspect ratio: resize_value = int(math.ceil(float(args.input_size) / args.resize_inv_factor)) h, w = image.shape[:2] if not ((w == resize_value and w <= h) or (h == resize_value and h <= w)): resize_size = (resize_value, int(resize_value * h / w)) if w < h else (int(resize_value * w / h), resize_value) image = cv2.resize(image, dsize=resize_size, interpolation=cv2.INTER_LINEAR) # Center crop of the image: h, w = image.shape[:2] th, tw = args.input_size, args.input_size ih = int(round(0.5 * (h - th))) jw = int(round(0.5 * (w - tw))) image = image[ih:(ih + th), jw:(jw + tw), :] # cv2.imshow("image2", image) # cv2.waitKey(0) # cv2.destroyAllWindows() # Convert image to a float tensor and normalize it: x = image.astype(np.float32) x = x / 255.0 x = (x - np.array(args.mean_rgb)) / np.array(args.std_rgb) # Create MXNet context: mx_ctx = [mx.gpu(i) for i in range(args.num_gpus)] if args.num_gpus > 0 else [mx.cpu()] # Convert the tensor to a MXNet nd-array: x = x.transpose(2, 0, 1) x = np.expand_dims(x, axis=0) x = mx.nd.array(x, ctx=mx.cpu()) # Create model with loading pretrained weights: net = glcv2_get_model(args.model, pretrained=True, ctx=mx_ctx) # Evaluate the network: y = net(x) probs = mx.nd.softmax(y) # Show results: top_k = 5 probs_np = probs.asnumpy().squeeze(axis=0) top_k_inds = probs_np.argsort()[::-1][:top_k] classes = ImageNet1kAttr().classes print("The input picture is classified to be:") for k in range(top_k): print("{idx}: [{class_name}], with probability {prob:.3f}.".format( idx=(k + 1), class_name=classes[top_k_inds[k]], prob=probs_np[top_k_inds[k]])) if __name__ == "__main__": main()
3,841
27.887218
119
py
imgclsmob
imgclsmob-master/other/train_pt_cifar-.py
import argparse import time import logging import os import warnings import random import numpy as np import torch.nn as nn import torch.backends.cudnn as cudnn import torch.utils.data from common.logger_utils import initialize_logging from common.train_log_param_saver import TrainLogParamSaver from pytorch.cifar1 import add_dataset_parser_arguments, get_train_data_loader, get_val_data_loader from pytorch.utils import prepare_pt_context, prepare_model, validate1, accuracy, AverageMeter def parse_args(): parser = argparse.ArgumentParser( description='Train a model for image classification (PyTorch/CIFAR)', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--dataset', type=str, default="CIFAR10", help='dataset name. options are CIFAR10 and CIFAR100') args, _ = parser.parse_known_args() add_dataset_parser_arguments(parser, args.dataset) parser.add_argument( '--model', type=str, required=True, help='type of model to use. see model_provider for options.') parser.add_argument( '--use-pretrained', action='store_true', help='enable using pretrained model from gluon.') parser.add_argument( '--resume', type=str, default='', help='resume from previously saved parameters if not None') parser.add_argument( '--resume-state', type=str, default='', help='resume from previously saved optimizer state if not None') parser.add_argument( '--num-gpus', type=int, default=0, help='number of gpus to use.') parser.add_argument( '-j', '--num-data-workers', dest='num_workers', default=4, type=int, help='number of preprocessing workers') parser.add_argument( '--batch-size', type=int, default=128, help='training batch size per device (CPU/GPU).') parser.add_argument( '--num-epochs', type=int, default=3, help='number of training epochs.') parser.add_argument( '--start-epoch', type=int, default=1, help='starting epoch for resuming, default is 1 for new training') parser.add_argument( '--attempt', type=int, default=1, help='current number of training') parser.add_argument( '--optimizer-name', type=str, default='nag', help='optimizer name') parser.add_argument( '--lr', type=float, default=0.1, help='learning rate. default is 0.1.') parser.add_argument( '--lr-mode', type=str, default='step', help='learning rate scheduler mode. options are step, poly and cosine.') parser.add_argument( '--lr-decay', type=float, default=0.1, help='decay rate of learning rate. default is 0.1.') parser.add_argument( '--lr-decay-period', type=int, default=0, help='interval for periodic learning rate decays. default is 0 to disable.') parser.add_argument( '--lr-decay-epoch', type=str, default='40,60', help='epoches at which learning rate decays. default is 40,60.') parser.add_argument( '--warmup-lr', type=float, default=0.0, help='starting warmup learning rate. default is 0.0.') parser.add_argument( '--warmup-epochs', type=int, default=0, help='number of warmup epochs.') parser.add_argument( '--momentum', type=float, default=0.9, help='momentum value for optimizer, default is 0.9.') parser.add_argument( '--wd', type=float, default=0.0001, help='weight decay rate. default is 0.0001.') parser.add_argument( '--log-interval', type=int, default=200, help='number of batches to wait before logging.') parser.add_argument( '--save-interval', type=int, default=4, help='saving parameters epoch interval, best model will always be saved') parser.add_argument( '--save-dir', type=str, default='', help='directory of saved models and log-files') parser.add_argument( '--logging-file-name', type=str, default='train.log', help='filename of training log') parser.add_argument( '--seed', type=int, default=-1, help='Random seed to be fixed') parser.add_argument( '--log-packages', type=str, default='torch, torchvision', help='list of python packages for logging') parser.add_argument( '--log-pip-packages', type=str, default='', help='list of pip packages for logging') args = parser.parse_args() return args def init_rand(seed): if seed <= 0: seed = np.random.randint(10000) else: cudnn.deterministic = True warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) return seed def prepare_trainer(net, optimizer_name, wd, momentum, lr_mode, lr, lr_decay_period, lr_decay_epoch, lr_decay, # warmup_epochs, # batch_size, num_epochs, # num_training_samples, state_file_path): optimizer_name = optimizer_name.lower() if (optimizer_name == 'sgd') or (optimizer_name == 'nag'): optimizer = torch.optim.SGD( params=net.parameters(), lr=lr, momentum=momentum, weight_decay=wd, nesterov=(optimizer_name == 'nag')) else: raise ValueError("Usupported optimizer: {}".format(optimizer_name)) if state_file_path: checkpoint = torch.load(state_file_path) if type(checkpoint) == dict: optimizer.load_state_dict(checkpoint['optimizer']) start_epoch = checkpoint['epoch'] else: start_epoch = None else: start_epoch = None cudnn.benchmark = True lr_mode = lr_mode.lower() if lr_decay_period > 0: lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period)) else: lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(',')] if (lr_mode == 'step') and (lr_decay_period != 0): lr_scheduler = torch.optim.lr_scheduler.StepLR( optimizer=optimizer, step_size=lr_decay_period, gamma=lr_decay, last_epoch=-1) elif (lr_mode == 'multistep') or ((lr_mode == 'step') and (lr_decay_period == 0)): lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer=optimizer, milestones=lr_decay_epoch, gamma=lr_decay, last_epoch=-1) elif lr_mode == 'cosine': for group in optimizer.param_groups: group.setdefault('initial_lr', group['lr']) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer=optimizer, T_max=num_epochs, last_epoch=(num_epochs - 1)) else: raise ValueError("Usupported lr_scheduler: {}".format(lr_mode)) return optimizer, lr_scheduler, start_epoch def save_params(file_stem, state): torch.save( obj=state['state_dict'], f=(file_stem + '.pth')) torch.save( obj=state, f=(file_stem + '.states')) def train_epoch(epoch, acc_metric_train, net, train_data, use_cuda, L, optimizer, # lr_scheduler, batch_size, log_interval): tic = time.time() net.train() acc_metric_train.reset() train_loss = 0.0 btic = time.time() for i, (data, target) in enumerate(train_data): if use_cuda: data = data.cuda(non_blocking=True) target = target.cuda(non_blocking=True) output = net(data) loss = L(output, target) optimizer.zero_grad() loss.backward() optimizer.step() train_loss += loss.item() acc_train_value = accuracy(output, target, topk=(1, )) acc_metric_train.update(acc_train_value[0], data.size(0)) if log_interval and not (i + 1) % log_interval: acc_train_value = acc_metric_train.avg.item() err_train_value = 1.0 - acc_train_value speed = batch_size * log_interval / (time.time() - btic) logging.info('Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\terr={:.4f}\tlr={:.4f}'.format( epoch + 1, i, speed, err_train_value, optimizer.param_groups[0]['lr'])) btic = time.time() acc_train_value = acc_metric_train.avg.item() err_train_value = 1.0 - acc_train_value train_loss /= (i + 1) throughput = int(batch_size * (i + 1) / (time.time() - tic)) logging.info('[Epoch {}] training: err={:.4f}\tloss={:.4f}'.format( epoch + 1, err_train_value, train_loss)) logging.info('[Epoch {}] speed: {:.2f} samples/sec\ttime cost: {:.2f} sec'.format( epoch + 1, throughput, time.time() - tic)) return err_train_value, train_loss def train_net(batch_size, num_epochs, start_epoch1, train_data, val_data, net, optimizer, lr_scheduler, lp_saver, log_interval, use_cuda): acc_metric_val = AverageMeter() acc_metric_train = AverageMeter() L = nn.CrossEntropyLoss() if use_cuda: L = L.cuda() assert (type(start_epoch1) == int) assert (start_epoch1 >= 1) if start_epoch1 > 1: logging.info('Start training from [Epoch {}]'.format(start_epoch1)) err_val = validate1( accuracy_metric=acc_metric_val, net=net, val_data=val_data, use_cuda=use_cuda) logging.info('[Epoch {}] validation: err={:.4f}'.format( start_epoch1 - 1, err_val)) gtic = time.time() for epoch in range(start_epoch1 - 1, num_epochs): lr_scheduler.step() err_train, train_loss = train_epoch( epoch, acc_metric_train, net, train_data, use_cuda, L, optimizer, # lr_scheduler, batch_size, log_interval) err_val = validate1( accuracy_metric=acc_metric_val, net=net, val_data=val_data, use_cuda=use_cuda) logging.info('[Epoch {}] validation: err={:.4f}'.format( epoch + 1, err_val)) if lp_saver is not None: state = { 'epoch': epoch + 1, 'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict(), } lp_saver_kwargs = {'state': state} lp_saver.epoch_test_end_callback( epoch1=(epoch + 1), params=[err_val, err_train, train_loss, optimizer.param_groups[0]['lr']], **lp_saver_kwargs) logging.info('Total time cost: {:.2f} sec'.format(time.time() - gtic)) if lp_saver is not None: logging.info('Best err: {:.4f} at {} epoch'.format( lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch)) def main(): args = parse_args() args.seed = init_rand(seed=args.seed) _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) use_cuda, batch_size = prepare_pt_context( num_gpus=args.num_gpus, batch_size=args.batch_size) net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda) train_data = get_train_data_loader( dataset_name=args.dataset, dataset_dir=args.data_dir, batch_size=batch_size, num_workers=args.num_workers) val_data = get_val_data_loader( dataset_name=args.dataset, dataset_dir=args.data_dir, batch_size=batch_size, num_workers=args.num_workers) # num_training_samples = 1281167 optimizer, lr_scheduler, start_epoch = prepare_trainer( net=net, optimizer_name=args.optimizer_name, wd=args.wd, momentum=args.momentum, lr_mode=args.lr_mode, lr=args.lr, lr_decay_period=args.lr_decay_period, lr_decay_epoch=args.lr_decay_epoch, lr_decay=args.lr_decay, # warmup_epochs=args.warmup_epochs, # batch_size=batch_size, num_epochs=args.num_epochs, # num_training_samples=num_training_samples, state_file_path=args.resume_state) # if start_epoch is not None: # args.start_epoch = start_epoch if args.save_dir and args.save_interval: lp_saver = TrainLogParamSaver( checkpoint_file_name_prefix='{}_{}'.format(args.dataset.lower(), args.model), last_checkpoint_file_name_suffix="last", best_checkpoint_file_name_suffix=None, last_checkpoint_dir_path=args.save_dir, best_checkpoint_dir_path=None, last_checkpoint_file_count=2, best_checkpoint_file_count=2, checkpoint_file_save_callback=save_params, checkpoint_file_exts=('.pth', '.states'), save_interval=args.save_interval, num_epochs=args.num_epochs, param_names=['Val.Err', 'Train.Err', 'Train.Loss', 'LR'], acc_ind=0, # bigger=[True], # mask=None, score_log_file_path=os.path.join(args.save_dir, 'score.log'), score_log_attempt_value=args.attempt, best_map_log_file_path=os.path.join(args.save_dir, 'best_map.log')) else: lp_saver = None train_net( batch_size=batch_size, num_epochs=args.num_epochs, start_epoch1=args.start_epoch, train_data=train_data, val_data=val_data, net=net, optimizer=optimizer, lr_scheduler=lr_scheduler, lp_saver=lp_saver, log_interval=args.log_interval, use_cuda=use_cuda) if __name__ == '__main__': main()
15,172
30.092213
105
py
imgclsmob
imgclsmob-master/other/train_gl_seg.py
import os import shutil import argparse from tqdm import tqdm import mxnet as mx from mxnet import gluon, autograd from mxnet.gluon.data.vision import transforms import gluoncv from gluoncv.loss import MixSoftmaxCrossEntropyLoss from gluoncv.utils import LRScheduler from gluoncv.model_zoo.segbase import get_segmentation_model, SegEvalModel from gluoncv.model_zoo import get_model from gluoncv.utils.parallel import DataParallelModel, DataParallelCriterion from gluoncv.data import get_segmentation_dataset def parse_args(): """Training Options for Segmentation Experiments""" parser = argparse.ArgumentParser(description='MXNet Gluon Segmentation') parser.add_argument('--model', type=str, default='fcn', help='model name (default: fcn)') parser.add_argument('--backbone', type=str, default='resnet50', help='backbone name (default: resnet50)') parser.add_argument('--dataset', type=str, default='pascalaug', help='dataset name (default: pascal)') parser.add_argument('--dataset-dir', type=str, default='../imgclsmob_data/voc', help='dataset path') parser.add_argument('--workers', type=int, default=16, metavar='N', help='dataloader threads') parser.add_argument('--base-size', type=int, default=520, help='base image size') parser.add_argument('--crop-size', type=int, default=480, help='crop image size') parser.add_argument('--train-split', type=str, default='train', help='dataset train split (default: train)') parser.add_argument('--aux', action='store_true', default=False, help='Auxiliary loss') parser.add_argument('--aux-weight', type=float, default=0.5, help='auxiliary loss weight') parser.add_argument('--epochs', type=int, default=50, metavar='N', help='number of epochs to train (default: 50)') parser.add_argument('--start_epoch', type=int, default=0, metavar='N', help='start epochs (default:0)') parser.add_argument('--batch-size', type=int, default=16, metavar='N', help='input batch size for training (default: 16)') parser.add_argument('--test-batch-size', type=int, default=16, metavar='N', help='input batch size for testing (default: 32)') parser.add_argument('--lr', type=float, default=1e-3, metavar='LR', help='learning rate (default: 1e-3)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='momentum (default: 0.9)') parser.add_argument('--weight-decay', type=float, default=1e-4, metavar='M', help='w-decay (default: 1e-4)') parser.add_argument('--no-wd', action='store_true', help='whether to remove weight decay on bias, and beta/gamma for batchnorm layers.') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--ngpus', type=int, default=len(mx.test_utils.list_gpus()), help='number of GPUs (default: 4)') parser.add_argument('--kvstore', type=str, default='device', help='kvstore to use for trainer/module.') parser.add_argument('--dtype', type=str, default='float32', help='data type for training. default is float32') # checking point parser.add_argument('--resume', type=str, default=None, help='put the path to resuming file if needed') parser.add_argument('--checkname', type=str, default='default', help='set the checkpoint name') parser.add_argument('--model-zoo', type=str, default=None, help='evaluating on model zoo model') # evaluation only parser.add_argument('--eval', action='store_true', default=False, help='evaluation only') parser.add_argument('--no-val', action='store_true', default=False, help='skip validation during training') # synchronized Batch Normalization parser.add_argument('--syncbn', action='store_true', default=False, help='using Synchronized Cross-GPU BatchNorm') # the parser args = parser.parse_args() # handle contexts if args.no_cuda: print('Using CPU') args.kvstore = 'local' args.ctx = [mx.cpu(0)] else: print('Number of GPUs:', args.ngpus) args.ctx = [mx.gpu(i) for i in range(args.ngpus)] # Synchronized BatchNorm args.norm_layer = mx.gluon.contrib.nn.SyncBatchNorm if args.syncbn else mx.gluon.nn.BatchNorm args.norm_kwargs = {'num_devices': args.ngpus} if args.syncbn else {} print(args) return args class Trainer(object): def __init__(self, args): self.args = args # image transform input_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([.485, .456, .406], [.229, .224, .225]), ]) # dataset and dataloader data_kwargs = { 'transform': input_transform, 'base_size': args.base_size, 'crop_size': args.crop_size, 'root': args.dataset_dir} trainset = get_segmentation_dataset( args.dataset, split=args.train_split, mode='train', **data_kwargs) valset = get_segmentation_dataset( args.dataset, split='val', mode='val', **data_kwargs) self.train_data = gluon.data.DataLoader( trainset, args.batch_size, shuffle=True, last_batch='rollover', num_workers=args.workers) self.eval_data = gluon.data.DataLoader( valset, args.test_batch_size, last_batch='rollover', num_workers=args.workers) # create network if args.model_zoo is not None: model = get_model(args.model_zoo, pretrained=True) else: model = get_segmentation_model( model=args.model, dataset=args.dataset, backbone=args.backbone, norm_layer=args.norm_layer, norm_kwargs=args.norm_kwargs, aux=args.aux, crop_size=args.crop_size) model.cast(args.dtype) print(model) self.net = DataParallelModel(model, args.ctx, args.syncbn) self.evaluator = DataParallelModel(SegEvalModel(model), args.ctx) # resume checkpoint if needed if args.resume is not None: if os.path.isfile(args.resume): model.load_parameters(args.resume, ctx=args.ctx) else: raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume)) # create criterion criterion = MixSoftmaxCrossEntropyLoss(args.aux, aux_weight=args.aux_weight) self.criterion = DataParallelCriterion(criterion, args.ctx, args.syncbn) # optimizer and lr scheduling self.lr_scheduler = LRScheduler( mode='poly', base_lr=args.lr, nepochs=args.epochs, iters_per_epoch=len(self.train_data), power=0.9) kv = mx.kv.create(args.kvstore) optimizer_params = { 'lr_scheduler': self.lr_scheduler, 'wd': args.weight_decay, 'momentum': args.momentum} if args.dtype == 'float16': optimizer_params['multi_precision'] = True if args.no_wd: for k, v in self.net.module.collect_params('.*beta|.*gamma|.*bias').items(): v.wd_mult = 0.0 self.optimizer = gluon.Trainer( self.net.module.collect_params(), 'sgd', optimizer_params, kvstore=kv) # evaluation metrics self.metric = gluoncv.utils.metrics.SegmentationMetric(trainset.num_class) def training(self, epoch): tbar = tqdm(self.train_data) train_loss = 0.0 for i, (data, target) in enumerate(tbar): with autograd.record(True): outputs = self.net(data.astype(args.dtype, copy=False)) losses = self.criterion(outputs, target) mx.nd.waitall() autograd.backward(losses) self.optimizer.step(self.args.batch_size) for loss in losses: train_loss += loss.asnumpy()[0] / len(losses) tbar.set_description('Epoch {}, training loss {}'.format(epoch, train_loss / (i + 1))) mx.nd.waitall() # save every epoch save_checkpoint(self.net.module, self.args, False) def validation(self, epoch): self.metric.reset() tbar = tqdm(self.eval_data) for i, (data, target) in enumerate(tbar): outputs = self.evaluator(data.astype(args.dtype, copy=False)) outputs = [x[0] for x in outputs] targets = mx.gluon.utils.split_and_load(target, args.ctx, even_split=False) self.metric.update(targets, outputs) pixAcc, mIoU = self.metric.get() tbar.set_description('Epoch {}, validation pixAcc: {}, mIoU: {}'.format(epoch, pixAcc, mIoU)) mx.nd.waitall() def save_checkpoint(net, args, is_best=False): """Save Checkpoint""" directory = "../imgclsmob_data/{}/{}/{}/".format(args.dataset, args.model, args.checkname) if not os.path.exists(directory): os.makedirs(directory) filename = 'checkpoint.params' filename = directory + filename net.save_parameters(filename) if is_best: shutil.copyfile(filename, directory + 'model_best.params') if __name__ == "__main__": args = parse_args() trainer = Trainer(args) if args.eval: print('Evaluating model: ', args.resume) trainer.validation(args.start_epoch) else: print('Starting Epoch:', args.start_epoch) print('Total Epochs:', args.epochs) for epoch in range(args.start_epoch, args.epochs): trainer.training(epoch) if not trainer.args.no_val: trainer.validation(epoch)
9,856
43.201794
120
py
imgclsmob
imgclsmob-master/other/train_gl_cifar-.py
import argparse import time import logging import os import numpy as np import random import mxnet as mx from mxnet import gluon from mxnet import autograd as ag from common.logger_utils import initialize_logging from common.train_log_param_saver import TrainLogParamSaver from gluon.lr_scheduler import LRScheduler from gluon.utils import prepare_mx_context, prepare_model, validate, report_accuracy, get_composite_metric from gluon.dataset_utils import get_dataset_metainfo from gluon.dataset_utils import get_batch_fn from gluon.dataset_utils import get_train_data_source from gluon.dataset_utils import get_val_data_source def parse_args(): parser = argparse.ArgumentParser( description='Train a model for image classification (Gluon/CIFAR)', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--dataset', type=str, default="CIFAR10", help='dataset name. options are CIFAR10 and CIFAR100') parser.add_argument( "--work-dir", type=str, default=os.path.join("..", "imgclsmob_data"), help="path to working directory only for dataset root path preset") args, _ = parser.parse_known_args() dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset) dataset_metainfo.add_dataset_parser_arguments( parser=parser, work_dir_path=args.work_dir) parser.add_argument( '--model', type=str, required=True, help='type of model to use. see model_provider for options.') parser.add_argument( '--use-pretrained', action='store_true', help='enable using pretrained model from gluon.') parser.add_argument( '--dtype', type=str, default='float32', help='data type for training') parser.add_argument( '--not-hybridize', action='store_true', help='do not hybridize model') parser.add_argument( '--resume', type=str, default='', help='resume from previously saved parameters if not None') parser.add_argument( '--resume-state', type=str, default='', help='resume from previously saved optimizer state if not None') parser.add_argument( '--num-gpus', type=int, default=0, help='number of gpus to use.') parser.add_argument( '-j', '--num-data-workers', dest='num_workers', default=4, type=int, help='number of preprocessing workers') parser.add_argument( '--batch-size', type=int, default=128, help='training batch size per device (CPU/GPU).') parser.add_argument( '--batch-size-scale', type=int, default=1, help='manual batch-size increasing factor.') parser.add_argument( '--num-epochs', type=int, default=200, help='number of training epochs.') parser.add_argument( '--start-epoch', type=int, default=1, help='starting epoch for resuming, default is 1 for new training') parser.add_argument( '--attempt', type=int, default=1, help='current number of training') parser.add_argument( '--optimizer-name', type=str, default='nag', help='optimizer name') parser.add_argument( '--lr', type=float, default=0.1, help='learning rate') parser.add_argument( '--lr-mode', type=str, default='cosine', help='learning rate scheduler mode. options are step, poly and cosine') parser.add_argument( '--lr-decay', type=float, default=0.1, help='decay rate of learning rate') parser.add_argument( '--lr-decay-period', type=int, default=0, help='interval for periodic learning rate decays. default is 0 to disable.') parser.add_argument( '--lr-decay-epoch', type=str, default='40,60', help='epoches at which learning rate decays') parser.add_argument( '--target-lr', type=float, default=1e-8, help='ending learning rate') parser.add_argument( '--poly-power', type=float, default=2, help='power value for poly LR scheduler') parser.add_argument( '--warmup-epochs', type=int, default=0, help='number of warmup epochs.') parser.add_argument( '--warmup-lr', type=float, default=1e-8, help='starting warmup learning rate') parser.add_argument( '--warmup-mode', type=str, default='linear', help='learning rate scheduler warmup mode. options are linear, poly and constant') parser.add_argument( '--momentum', type=float, default=0.9, help='momentum value for optimizer') parser.add_argument( '--wd', type=float, default=0.0001, help='weight decay rate') parser.add_argument( '--gamma-wd-mult', type=float, default=1.0, help='weight decay multiplier for batchnorm gamma') parser.add_argument( '--beta-wd-mult', type=float, default=1.0, help='weight decay multiplier for batchnorm beta') parser.add_argument( '--bias-wd-mult', type=float, default=1.0, help='weight decay multiplier for bias') parser.add_argument( '--grad-clip', type=float, default=None, help='max_norm for gradient clipping') parser.add_argument( '--label-smoothing', action='store_true', help='use label smoothing') parser.add_argument( '--mixup', action='store_true', help='use mixup strategy') parser.add_argument( '--mixup-epoch-tail', type=int, default=20, help='number of epochs without mixup at the end of training') parser.add_argument( '--log-interval', type=int, default=200, help='number of batches to wait before logging.') parser.add_argument( '--save-interval', type=int, default=4, help='saving parameters epoch interval, best model will always be saved') parser.add_argument( '--save-dir', type=str, default='', help='directory of saved models and log-files') parser.add_argument( '--logging-file-name', type=str, default='train.log', help='filename of training log') parser.add_argument( '--seed', type=int, default=-1, help='Random seed to be fixed') parser.add_argument( '--log-packages', type=str, default='mxnet', help='list of python packages for logging') parser.add_argument( '--log-pip-packages', type=str, default='mxnet-cu100', help='list of pip packages for logging') parser.add_argument( '--tune-layers', type=str, default='', help='Regexp for selecting layers for fine tuning') args = parser.parse_args() return args def init_rand(seed): if seed <= 0: seed = np.random.randint(10000) random.seed(seed) np.random.seed(seed) mx.random.seed(seed) return seed def prepare_trainer(net, optimizer_name, wd, momentum, lr_mode, lr, lr_decay_period, lr_decay_epoch, lr_decay, target_lr, poly_power, warmup_epochs, warmup_lr, warmup_mode, batch_size, num_epochs, num_training_samples, dtype, gamma_wd_mult=1.0, beta_wd_mult=1.0, bias_wd_mult=1.0, state_file_path=None): if gamma_wd_mult != 1.0: for k, v in net.collect_params(".*gamma").items(): v.wd_mult = gamma_wd_mult if beta_wd_mult != 1.0: for k, v in net.collect_params(".*beta").items(): v.wd_mult = beta_wd_mult if bias_wd_mult != 1.0: for k, v in net.collect_params(".*bias").items(): v.wd_mult = bias_wd_mult if lr_decay_period > 0: lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period)) else: lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(',')] num_batches = num_training_samples // batch_size lr_scheduler = LRScheduler( mode=lr_mode, base_lr=lr, n_iters=num_batches, n_epochs=num_epochs, step=lr_decay_epoch, step_factor=lr_decay, target_lr=target_lr, power=poly_power, warmup_epochs=warmup_epochs, warmup_lr=warmup_lr, warmup_mode=warmup_mode) optimizer_params = {"learning_rate": lr, "wd": wd, "momentum": momentum, "lr_scheduler": lr_scheduler} if dtype != "float32": optimizer_params["multi_precision"] = True trainer = gluon.Trainer( params=net.collect_params(), optimizer=optimizer_name, optimizer_params=optimizer_params) if (state_file_path is not None) and state_file_path and os.path.exists(state_file_path): logging.info("Loading trainer states: {}".format(state_file_path)) trainer.load_states(state_file_path) if trainer._optimizer.wd != wd: trainer._optimizer.wd = wd logging.info("Reset the weight decay: {}".format(wd)) # lr_scheduler = trainer._optimizer.lr_scheduler trainer._optimizer.lr_scheduler = lr_scheduler return trainer, lr_scheduler def save_params(file_stem, net, trainer): net.save_parameters(file_stem + ".params") trainer.save_states(file_stem + ".states") def train_epoch(epoch, net, train_metric, train_data, batch_fn, data_source_needs_reset, dtype, ctx, loss_func, trainer, lr_scheduler, batch_size, log_interval, mixup, mixup_epoch_tail, label_smoothing, num_classes, num_epochs, grad_clip_value, batch_size_scale): labels_list_inds = None batch_size_extend_count = 0 tic = time.time() if data_source_needs_reset: train_data.reset() train_metric.reset() train_loss = 0.0 btic = time.time() for i, batch in enumerate(train_data): data_list, labels_list = batch_fn(batch, ctx) if mixup: labels_list_inds = labels_list labels_list = [Y.one_hot(depth=num_classes) for Y in labels_list] if epoch < num_epochs - mixup_epoch_tail: alpha = 1 lam = np.random.beta(alpha, alpha) data_list = [lam * X + (1 - lam) * X[::-1] for X in data_list] labels_list = [lam * Y + (1 - lam) * Y[::-1] for Y in labels_list] elif label_smoothing: eta = 0.1 on_value = 1 - eta + eta / num_classes off_value = eta / num_classes labels_list_inds = labels_list labels_list = [Y.one_hot(depth=num_classes, on_value=on_value, off_value=off_value) for Y in labels_list] with ag.record(): outputs_list = [net(X.astype(dtype, copy=False)) for X in data_list] loss_list = [loss_func(yhat, y.astype(dtype, copy=False)) for yhat, y in zip(outputs_list, labels_list)] for loss in loss_list: loss.backward() lr_scheduler.update(i, epoch) if grad_clip_value is not None: grads = [v.grad(ctx[0]) for v in net.collect_params().values() if v._grad is not None] gluon.utils.clip_global_norm(grads, max_norm=grad_clip_value) if batch_size_scale == 1: trainer.step(batch_size) else: if (i + 1) % batch_size_scale == 0: batch_size_extend_count = 0 trainer.step(batch_size * batch_size_scale) for p in net.collect_params().values(): p.zero_grad() else: batch_size_extend_count += 1 train_loss += sum([loss.mean().asscalar() for loss in loss_list]) / len(loss_list) train_metric.update( src_pts=(labels_list if not (mixup or label_smoothing) else labels_list_inds), dst_pts=outputs_list) if log_interval and not (i + 1) % log_interval: speed = batch_size * log_interval / (time.time() - btic) btic = time.time() train_accuracy_msg = report_accuracy(metric=train_metric) logging.info("Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\t{}\tlr={:.5f}".format( epoch + 1, i, speed, train_accuracy_msg, trainer.learning_rate)) if (batch_size_scale != 1) and (batch_size_extend_count > 0): trainer.step(batch_size * batch_size_extend_count) for p in net.collect_params().values(): p.zero_grad() throughput = int(batch_size * (i + 1) / (time.time() - tic)) logging.info("[Epoch {}] speed: {:.2f} samples/sec\ttime cost: {:.2f} sec".format( epoch + 1, throughput, time.time() - tic)) train_loss /= (i + 1) train_accuracy_msg = report_accuracy(metric=train_metric) logging.info("[Epoch {}] training: {}\tloss={:.4f}".format( epoch + 1, train_accuracy_msg, train_loss)) return train_loss def train_net(batch_size, num_epochs, start_epoch1, train_data, val_data, batch_fn, data_source_needs_reset, dtype, net, trainer, lr_scheduler, lp_saver, log_interval, mixup, mixup_epoch_tail, label_smoothing, num_classes, grad_clip_value, batch_size_scale, val_metric, train_metric, opt_metric_name, ctx): assert (not (mixup and label_smoothing)) if batch_size_scale != 1: for p in net.collect_params().values(): p.grad_req = "add" if isinstance(ctx, mx.Context): ctx = [ctx] loss_func = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=(not (mixup or label_smoothing))) assert (type(start_epoch1) == int) assert (start_epoch1 >= 1) if start_epoch1 > 1: logging.info("Start training from [Epoch {}]".format(start_epoch1)) validate( metric=val_metric, net=net, val_data=val_data, batch_fn=batch_fn, data_source_needs_reset=data_source_needs_reset, dtype=dtype, ctx=ctx) val_accuracy_msg = report_accuracy(metric=val_metric) logging.info("[Epoch {}] validation: {}".format(start_epoch1 - 1, val_accuracy_msg)) gtic = time.time() for epoch in range(start_epoch1 - 1, num_epochs): train_loss = train_epoch( epoch=epoch, net=net, train_metric=train_metric, train_data=train_data, batch_fn=batch_fn, data_source_needs_reset=data_source_needs_reset, dtype=dtype, ctx=ctx, loss_func=loss_func, trainer=trainer, lr_scheduler=lr_scheduler, batch_size=batch_size, log_interval=log_interval, mixup=mixup, mixup_epoch_tail=mixup_epoch_tail, label_smoothing=label_smoothing, num_classes=num_classes, num_epochs=num_epochs, grad_clip_value=grad_clip_value, batch_size_scale=batch_size_scale) validate( metric=val_metric, net=net, val_data=val_data, batch_fn=batch_fn, data_source_needs_reset=data_source_needs_reset, dtype=dtype, ctx=ctx) val_accuracy_msg = report_accuracy(metric=val_metric) logging.info("[Epoch {}] validation: {}".format(epoch + 1, val_accuracy_msg)) if lp_saver is not None: lp_saver_kwargs = {"net": net, "trainer": trainer} val_acc_values = val_metric.get()[1] train_acc_values = train_metric.get()[1] val_acc_values = val_acc_values if type(val_acc_values) == list else [val_acc_values] train_acc_values = train_acc_values if type(train_acc_values) == list else [train_acc_values] lp_saver.epoch_test_end_callback( epoch1=(epoch + 1), params=(val_acc_values + train_acc_values + [train_loss, trainer.learning_rate]), **lp_saver_kwargs) logging.info("Total time cost: {:.2f} sec".format(time.time() - gtic)) if lp_saver is not None: logging.info("Best {}: {:.4f} at {} epoch".format( opt_metric_name, lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch)) def main(): args = parse_args() args.seed = init_rand(seed=args.seed) _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) ctx, batch_size = prepare_mx_context( num_gpus=args.num_gpus, batch_size=args.batch_size) net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), dtype=args.dtype, tune_layers=args.tune_layers, classes=args.num_classes, in_channels=args.in_channels, do_hybridize=(not args.not_hybridize), ctx=ctx) assert (hasattr(net, "classes")) num_classes = net.classes ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset) ds_metainfo.update(args=args) train_data = get_train_data_source( ds_metainfo=ds_metainfo, batch_size=batch_size, num_workers=args.num_workers) val_data = get_val_data_source( ds_metainfo=ds_metainfo, batch_size=batch_size, num_workers=args.num_workers) batch_fn = get_batch_fn(use_imgrec=ds_metainfo.use_imgrec) num_training_samples = len(train_data._dataset) if not ds_metainfo.use_imgrec else ds_metainfo.num_training_samples trainer, lr_scheduler = prepare_trainer( net=net, optimizer_name=args.optimizer_name, wd=args.wd, momentum=args.momentum, lr_mode=args.lr_mode, lr=args.lr, lr_decay_period=args.lr_decay_period, lr_decay_epoch=args.lr_decay_epoch, lr_decay=args.lr_decay, target_lr=args.target_lr, poly_power=args.poly_power, warmup_epochs=args.warmup_epochs, warmup_lr=args.warmup_lr, warmup_mode=args.warmup_mode, batch_size=batch_size, num_epochs=args.num_epochs, num_training_samples=num_training_samples, dtype=args.dtype, gamma_wd_mult=args.gamma_wd_mult, beta_wd_mult=args.beta_wd_mult, bias_wd_mult=args.bias_wd_mult, state_file_path=args.resume_state) if args.save_dir and args.save_interval: param_names = ds_metainfo.val_metric_capts + ds_metainfo.train_metric_capts + ["Train.Loss", "LR"] lp_saver = TrainLogParamSaver( checkpoint_file_name_prefix="{}_{}".format(args.dataset.lower(), args.model), last_checkpoint_file_name_suffix="last", best_checkpoint_file_name_suffix=None, last_checkpoint_dir_path=args.save_dir, best_checkpoint_dir_path=None, last_checkpoint_file_count=2, best_checkpoint_file_count=2, checkpoint_file_save_callback=save_params, checkpoint_file_exts=(".params", ".states"), save_interval=args.save_interval, num_epochs=args.num_epochs, param_names=param_names, acc_ind=ds_metainfo.saver_acc_ind, # bigger=[True], # mask=None, score_log_file_path=os.path.join(args.save_dir, "score.log"), score_log_attempt_value=args.attempt, best_map_log_file_path=os.path.join(args.save_dir, "best_map.log")) else: lp_saver = None train_net( batch_size=batch_size, num_epochs=args.num_epochs, start_epoch1=args.start_epoch, train_data=train_data, val_data=val_data, batch_fn=batch_fn, data_source_needs_reset=ds_metainfo.use_imgrec, dtype=args.dtype, net=net, trainer=trainer, lr_scheduler=lr_scheduler, lp_saver=lp_saver, log_interval=args.log_interval, mixup=args.mixup, mixup_epoch_tail=args.mixup_epoch_tail, label_smoothing=args.label_smoothing, num_classes=num_classes, grad_clip_value=args.grad_clip, batch_size_scale=args.batch_size_scale, val_metric=get_composite_metric(ds_metainfo.val_metric_names), train_metric=get_composite_metric(ds_metainfo.train_metric_names), opt_metric_name=ds_metainfo.val_metric_names[ds_metainfo.saver_acc_ind], ctx=ctx) if __name__ == "__main__": main()
22,007
31.798808
119
py
imgclsmob
imgclsmob-master/other/eval_ch_cifar-.py
import argparse import time import logging import numpy as np from chainer import cuda, global_config import chainer.functions as F from chainercv.utils import apply_to_iterator from chainercv.utils import ProgressHook from common.logger_utils import initialize_logging from chainer_.utils import prepare_model from chainer_.cifar1 import add_dataset_parser_arguments from chainer_.cifar1 import get_val_data_iterator from chainer_.cifar1 import CIFARPredictor def parse_args(): parser = argparse.ArgumentParser( description='Evaluate a model for image classification (Chainer/CIFAR/SVHN)', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--dataset', type=str, default="CIFAR10", help='dataset name. options are CIFAR10, CIFAR100, and SVHN') args, _ = parser.parse_known_args() add_dataset_parser_arguments(parser, args.dataset) parser.add_argument( '--model', type=str, required=True, help='type of model to use. see model_provider for options.') parser.add_argument( '--use-pretrained', action='store_true', help='enable using pretrained model from gluon.') parser.add_argument( '--resume', type=str, default='', help='resume from previously saved parameters if not None') parser.add_argument( '--num-gpus', type=int, default=0, help='number of gpus to use.') parser.add_argument( '-j', '--num-data-workers', dest='num_workers', default=4, type=int, help='number of preprocessing workers') parser.add_argument( '--batch-size', type=int, default=32, help='training batch size per device (CPU/GPU).') parser.add_argument( '--save-dir', type=str, default='', help='directory of saved models and log-files') parser.add_argument( '--logging-file-name', type=str, default='train.log', help='filename of training log') parser.add_argument( '--log-packages', type=str, default='chainer, chainercv', help='list of python packages for logging') parser.add_argument( '--log-pip-packages', type=str, default='cupy-cuda92, cupy-cuda100, chainer, chainercv', help='list of pip packages for logging') args = parser.parse_args() return args def test(net, val_iterator, val_dataset_len, num_gpus, calc_weight_count=False, extended_log=False): tic = time.time() predictor = CIFARPredictor(base_model=net) if num_gpus > 0: predictor.to_gpu() if calc_weight_count: weight_count = net.count_params() logging.info('Model: {} trainable parameters'.format(weight_count)) in_values, out_values, rest_values = apply_to_iterator( predictor.predict, val_iterator, hook=ProgressHook(val_dataset_len)) del in_values pred_probs, = out_values gt_labels, = rest_values y = np.array(list(pred_probs)) t = np.array(list(gt_labels)) acc_val_value = F.accuracy( y=y, t=t).data err_val = 1.0 - acc_val_value if extended_log: logging.info('Test: err={err:.4f} ({err})'.format( err=err_val)) else: logging.info('Test: err={err:.4f}'.format( err=err_val)) logging.info('Time cost: {:.4f} sec'.format( time.time() - tic)) def main(): args = parse_args() _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) global_config.train = False num_gpus = args.num_gpus if num_gpus > 0: cuda.get_device(0).use() net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_gpus=(num_gpus > 0)) val_iterator, val_dataset_len = get_val_data_iterator( dataset_name=args.dataset, batch_size=args.batch_size, num_workers=args.num_workers) assert (args.use_pretrained or args.resume.strip()) test( net=net, val_iterator=val_iterator, val_dataset_len=val_dataset_len, num_gpus=num_gpus, calc_weight_count=True, extended_log=True) if __name__ == '__main__': main()
4,640
25.52
85
py
imgclsmob
imgclsmob-master/other/eval_pt_seg-.py
import argparse import time import logging from common.logger_utils import initialize_logging from pytorch.model_stats import measure_model from pytorch.seg_utils import add_dataset_parser_arguments, get_test_data_loader, get_metainfo, validate1 from pytorch.utils import prepare_pt_context, prepare_model, calc_net_weight_count from pytorch.metrics.seg_metrics import PixelAccuracyMetric, MeanIoUMetric def parse_args(): parser = argparse.ArgumentParser( description='Evaluate a model for image segmentation (PyTorch/VOC2012/ADE20K/Cityscapes/COCO)', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--dataset', type=str, default="VOC", help='dataset name. options are VOC, ADE20K, Cityscapes, COCO') args, _ = parser.parse_known_args() add_dataset_parser_arguments(parser, args.dataset) parser.add_argument( '--model', type=str, required=True, help='type of model to use. see model_provider for options.') parser.add_argument( '--use-pretrained', action='store_true', help='enable using pretrained model from github.') parser.add_argument( '--resume', type=str, default='', help='resume from previously saved parameters if not None') parser.add_argument( '--calc-flops', dest='calc_flops', action='store_true', help='calculate FLOPs') parser.add_argument( '--calc-flops-only', dest='calc_flops_only', action='store_true', help='calculate FLOPs without quality estimation') parser.add_argument( '--remove-module', action='store_true', help='enable if stored model has module') parser.add_argument( '--num-gpus', type=int, default=0, help='number of gpus to use.') parser.add_argument( '-j', '--num-data-workers', dest='num_workers', default=4, type=int, help='number of preprocessing workers') parser.add_argument( '--save-dir', type=str, default='', help='directory of saved models and log-files') parser.add_argument( '--logging-file-name', type=str, default='train.log', help='filename of training log') parser.add_argument( '--log-packages', type=str, default='torch, torchvision', help='list of python packages for logging') parser.add_argument( '--log-pip-packages', type=str, default='', help='list of pip packages for logging') args = parser.parse_args() return args def test(net, test_data, use_cuda, input_image_size, in_channels, num_classes, calc_weight_count=False, calc_flops=False, calc_flops_only=True, extended_log=False, dataset_metainfo=None): assert (dataset_metainfo is not None) if not calc_flops_only: metric = [] pix_acc_macro_average = False metric.append(PixelAccuracyMetric( vague_idx=dataset_metainfo["vague_idx"], use_vague=dataset_metainfo["use_vague"], macro_average=pix_acc_macro_average)) mean_iou_macro_average = False metric.append(MeanIoUMetric( num_classes=num_classes, vague_idx=dataset_metainfo["vague_idx"], use_vague=dataset_metainfo["use_vague"], bg_idx=dataset_metainfo["background_idx"], ignore_bg=dataset_metainfo["ignore_bg"], macro_average=mean_iou_macro_average)) tic = time.time() accuracy_info = validate1( accuracy_metrics=metric, net=net, val_data=test_data, use_cuda=use_cuda) pix_acc = accuracy_info[0][1] mean_iou = accuracy_info[1][1] pix_macro = "macro" if pix_acc_macro_average else "micro" iou_macro = "macro" if mean_iou_macro_average else "micro" if extended_log: logging.info( "Test: {pix_macro}-pix_acc={pix_acc:.4f} ({pix_acc}), " "{iou_macro}-mean_iou={mean_iou:.4f} ({mean_iou})".format( pix_macro=pix_macro, pix_acc=pix_acc, iou_macro=iou_macro, mean_iou=mean_iou)) else: logging.info("Test: {pix_macro}-pix_acc={pix_acc:.4f}, {iou_macro}-mean_iou={mean_iou:.4f}".format( pix_macro=pix_macro, pix_acc=pix_acc, iou_macro=iou_macro, mean_iou=mean_iou)) logging.info('Time cost: {:.4f} sec'.format( time.time() - tic)) if calc_weight_count: weight_count = calc_net_weight_count(net) if not calc_flops: logging.info('Model: {} trainable parameters'.format(weight_count)) if calc_flops: num_flops, num_macs, num_params = measure_model(net, in_channels, input_image_size) assert (not calc_weight_count) or (weight_count == num_params) stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \ " FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)" logging.info(stat_msg.format( params=num_params, params_m=num_params / 1e6, flops=num_flops, flops_m=num_flops / 1e6, flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6, macs=num_macs, macs_m=num_macs / 1e6)) def main(): args = parse_args() _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) use_cuda, batch_size = prepare_pt_context( num_gpus=args.num_gpus, batch_size=1) net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda, net_extra_kwargs={"aux": False, "fixed_size": False}, load_ignore_extra=True, remove_module=args.remove_module) if hasattr(net, 'module'): input_image_size = net.module.in_size[0] if hasattr(net.module, 'in_size') else args.input_size else: input_image_size = net.in_size[0] if hasattr(net, 'in_size') else args.input_size test_data = get_test_data_loader( dataset_name=args.dataset, dataset_dir=args.data_dir, batch_size=batch_size, num_workers=args.num_workers) assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only) test( net=net, test_data=test_data, use_cuda=use_cuda, # calc_weight_count=(not log_file_exist), input_image_size=(input_image_size, input_image_size), in_channels=args.in_channels, num_classes=args.num_classes, calc_weight_count=True, calc_flops=args.calc_flops, calc_flops_only=args.calc_flops_only, extended_log=True, dataset_metainfo=get_metainfo(args.dataset)) if __name__ == '__main__': main()
7,218
33.706731
111
py
imgclsmob
imgclsmob-master/other/eval_ch_seg-.py
import argparse import time import logging from chainer import cuda, global_config from chainer import iterators from chainercv.utils import apply_to_iterator from chainercv.utils import ProgressHook from common.logger_utils import initialize_logging from chainer_.utils import prepare_model from chainer_.seg_utils1 import add_dataset_parser_arguments from chainer_.seg_utils1 import get_test_dataset from chainer_.seg_utils1 import SegPredictor from chainer_.seg_utils1 import get_metainfo from chainer_.metrics.seg_metrics import PixelAccuracyMetric, MeanIoUMetric def parse_args(): parser = argparse.ArgumentParser( description='Evaluate a model for image segmentation (Chainer/VOC2012/ADE20K/Cityscapes/COCO)', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--dataset', type=str, default="VOC", help='dataset name. options are VOC, ADE20K, Cityscapes, COCO') args, _ = parser.parse_known_args() add_dataset_parser_arguments(parser, args.dataset) parser.add_argument( '--model', type=str, required=True, help='type of model to use. see model_provider for options.') parser.add_argument( '--use-pretrained', action='store_true', help='enable using pretrained model from gluon.') parser.add_argument( '--resume', type=str, default='', help='resume from previously saved parameters if not None') parser.add_argument( '--num-gpus', type=int, default=0, help='number of gpus to use.') parser.add_argument( '-j', '--num-data-workers', dest='num_workers', default=4, type=int, help='number of preprocessing workers') parser.add_argument( '--save-dir', type=str, default='', help='directory of saved models and log-files') parser.add_argument( '--logging-file-name', type=str, default='train.log', help='filename of training log') parser.add_argument( '--log-packages', type=str, default='chainer, chainercv', help='list of python packages for logging') parser.add_argument( '--log-pip-packages', type=str, default='cupy-cuda92, cupy-cuda100, chainer, chainercv', help='list of pip packages for logging') args = parser.parse_args() return args def test(net, test_dataset, num_gpus, num_classes, calc_weight_count=False, extended_log=False, dataset_metainfo=None): assert (dataset_metainfo is not None) tic = time.time() it = iterators.SerialIterator( dataset=test_dataset, batch_size=1, repeat=False, shuffle=False) predictor = SegPredictor(base_model=net) if num_gpus > 0: predictor.to_gpu() if calc_weight_count: weight_count = net.count_params() logging.info('Model: {} trainable parameters'.format(weight_count)) in_values, out_values, rest_values = apply_to_iterator( predictor.predict, it, hook=ProgressHook(len(test_dataset))) del in_values pred_labels, = out_values gt_labels, = rest_values metrics = [] pix_acc_macro_average = False metrics.append(PixelAccuracyMetric( vague_idx=dataset_metainfo["vague_idx"], use_vague=dataset_metainfo["use_vague"], macro_average=pix_acc_macro_average)) mean_iou_macro_average = False metrics.append(MeanIoUMetric( num_classes=num_classes, vague_idx=dataset_metainfo["vague_idx"], use_vague=dataset_metainfo["use_vague"], bg_idx=dataset_metainfo["background_idx"], ignore_bg=dataset_metainfo["ignore_bg"], macro_average=mean_iou_macro_average)) labels = iter(gt_labels) preds = iter(pred_labels) for label, pred in zip(labels, preds): for metric in metrics: metric.update(label, pred) accuracy_info = [metric.get() for metric in metrics] pix_acc = accuracy_info[0][1] mean_iou = accuracy_info[1][1] pix_macro = "macro" if pix_acc_macro_average else "micro" iou_macro = "macro" if mean_iou_macro_average else "micro" if extended_log: logging.info( "Test: {pix_macro}-pix_acc={pix_acc:.4f} ({pix_acc}), " "{iou_macro}-mean_iou={mean_iou:.4f} ({mean_iou})".format( pix_macro=pix_macro, pix_acc=pix_acc, iou_macro=iou_macro, mean_iou=mean_iou)) else: logging.info("Test: {pix_macro}-pix_acc={pix_acc:.4f}, {iou_macro}-mean_iou={mean_iou:.4f}".format( pix_macro=pix_macro, pix_acc=pix_acc, iou_macro=iou_macro, mean_iou=mean_iou)) logging.info('Time cost: {:.4f} sec'.format( time.time() - tic)) def main(): args = parse_args() _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) global_config.train = False num_gpus = args.num_gpus if num_gpus > 0: cuda.get_device(0).use() net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), net_extra_kwargs={"aux": False, "fixed_size": False}, use_gpus=(num_gpus > 0)) test_dataset = get_test_dataset( dataset_name=args.dataset, dataset_dir=args.data_dir) assert (args.use_pretrained or args.resume.strip()) test( net=net, test_dataset=test_dataset, num_gpus=num_gpus, num_classes=args.num_classes, calc_weight_count=True, extended_log=True, dataset_metainfo=get_metainfo(args.dataset)) if __name__ == '__main__': main()
5,996
28.835821
107
py
imgclsmob
imgclsmob-master/other/eval_gl_mch.py
""" Script for evaluating trained image matching model on MXNet/Gluon (under development). """ import os import time import logging import argparse import numpy as np import mxnet as mx from mxnet.gluon.utils import split_and_load from common.logger_utils import initialize_logging from gluon.utils import prepare_mx_context, prepare_model from gluon.dataset_utils import get_dataset_metainfo from gluon.dataset_utils import get_val_data_source def add_eval_parser_arguments(parser): """ Create python script parameters (for eval specific subpart). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. """ parser.add_argument( "--model", type=str, required=True, help="type of model to use. see model_provider for options") parser.add_argument( "--use-pretrained", action="store_true", help="enable using pretrained model from github repo") parser.add_argument( "--dtype", type=str, default="float32", help="base data type for tensors") parser.add_argument( "--resume", type=str, default="", help="resume from previously saved parameters") parser.add_argument( "--calc-flops", dest="calc_flops", action="store_true", help="calculate FLOPs") parser.add_argument( "--calc-flops-only", dest="calc_flops_only", action="store_true", help="calculate FLOPs without quality estimation") parser.add_argument( "--data-subset", type=str, default="val", help="data subset. options are val and test") parser.add_argument( "--num-gpus", type=int, default=0, help="number of gpus to use") parser.add_argument( "-j", "--num-data-workers", dest="num_workers", default=4, type=int, help="number of preprocessing workers") parser.add_argument( "--batch-size", type=int, default=512, help="training batch size per device (CPU/GPU)") parser.add_argument( "--save-dir", type=str, default="", help="directory of saved models and log-files") parser.add_argument( "--logging-file-name", type=str, default="train.log", help="filename of training log") parser.add_argument( "--log-packages", type=str, default="mxnet, numpy", help="list of python packages for logging") parser.add_argument( "--log-pip-packages", type=str, default="mxnet-cu100", help="list of pip packages for logging") parser.add_argument( "--disable-cudnn-autotune", action="store_true", help="disable cudnn autotune for segmentation models") parser.add_argument( "--show-progress", action="store_true", help="show progress bar") def parse_args(): """ Parse python script parameters (common part). Returns: ------- ArgumentParser Resulted args. """ parser = argparse.ArgumentParser( description="Evaluate a model for image matching (Gluon/HPatches)", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "--dataset", type=str, default="HPatches", help="dataset name") parser.add_argument( "--work-dir", type=str, default=os.path.join("..", "imgclsmob_data"), help="path to working directory only for dataset root path preset") args, _ = parser.parse_known_args() dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset) dataset_metainfo.add_dataset_parser_arguments( parser=parser, work_dir_path=args.work_dir) add_eval_parser_arguments(parser) args = parser.parse_args() return args def warp_keypoints(keypoints, H): num_points = keypoints.shape[0] homogeneous_points = np.concatenate([keypoints, np.ones((num_points, 1))], axis=1) warped_points = np.dot(homogeneous_points, np.transpose(H)).squeeze(axis=2) return warped_points[:, :2] / warped_points[:, 2:] def keep_true_keypoints(points, H, shape): warped_points = warp_keypoints(points[:, [1, 0]], H) warped_points[:, [0, 1]] = warped_points[:, [1, 0]] mask = (warped_points[:, 0] >= 0) & (warped_points[:, 0] < shape[0]) &\ (warped_points[:, 1] >= 0) & (warped_points[:, 1] < shape[1]) return points[mask, :] def filter_keypoints(points, shape): mask = (points[:, 0] >= 0) & (points[:, 0] < shape[0]) &\ (points[:, 1] >= 0) & (points[:, 1] < shape[1]) return points[mask, :] def select_k_best(conf_pts, max_count=300): sorted_pts = conf_pts[conf_pts[:, 2].argsort(), :2] start = min(max_count, conf_pts.shape[0]) return sorted_pts[-start:, :] def calc_repeatability_np(src_pts, src_confs, dst_conf_pts, homography, src_shape, dst_shape): distance_thresh = 3 filtered_warped_keypoints = keep_true_keypoints(dst_conf_pts, np.linalg.inv(homography), src_shape) true_warped_keypoints = warp_keypoints(src_pts[:, [1, 0]], homography) true_warped_keypoints = np.stack([true_warped_keypoints[:, 1], true_warped_keypoints[:, 0], src_confs], axis=-1) true_warped_keypoints = filter_keypoints(true_warped_keypoints, dst_shape) filtered_warped_keypoints = select_k_best(filtered_warped_keypoints) true_warped_keypoints = select_k_best(true_warped_keypoints) n1 = true_warped_keypoints.shape[0] n2 = filtered_warped_keypoints.shape[0] true_warped_keypoints = np.expand_dims(true_warped_keypoints, 1) filtered_warped_keypoints = np.expand_dims(filtered_warped_keypoints, 0) norm = np.linalg.norm(true_warped_keypoints - filtered_warped_keypoints, ord=None, axis=2) count1 = 0 count2 = 0 if n2 != 0: min1 = np.min(norm, axis=1) count1 = np.sum(min1 <= distance_thresh) if n1 != 0: min2 = np.min(norm, axis=0) count2 = np.sum(min2 <= distance_thresh) if n1 + n2 > 0: repeatability = (count1 + count2) / (n1 + n2) else: repeatability = 0 return n1, n2, repeatability def batch_fn(batch, ctx): data_src = split_and_load(batch[0], ctx_list=ctx, batch_axis=0) data_dst = split_and_load(batch[1], ctx_list=ctx, batch_axis=0) label = split_and_load(batch[2], ctx_list=ctx, batch_axis=0) return data_src, data_dst, label def calc_detector_repeatability(test_data, net, ctx): tic = time.time() repeatabilities = [] n1s = [] n2s = [] for batch in test_data: data_src_list, data_dst_list, labels_list = batch_fn(batch, ctx) outputs_src_list = [net(X) for X in data_src_list] outputs_dst_list = [net(X) for X in data_dst_list] for i in range(len(data_src_list)): homography = labels_list[i].asnumpy() data_src_i = data_src_list[i] data_dst_i = data_dst_list[i] src_shape = data_src_i.shape[2:] dst_shape = data_dst_i.shape[2:] src_pts, src_confs, src_desc_map = outputs_src_list[i] dst_pts, dst_confs, dst_desc_map = outputs_dst_list[i] # src_conf_pts = mx.nd.concat(src_pts[0], src_confs[0].reshape(shape=(-1, 1)), dim=1).asnumpy() src_pts_np = src_pts[0].asnumpy() src_confs_np = src_confs[0].asnumpy() dst_conf_pts = mx.nd.concat(dst_pts[0], dst_confs[0].reshape(shape=(-1, 1)), dim=1).asnumpy() n1, n2, repeatability = calc_repeatability_np( src_pts_np, src_confs_np, dst_conf_pts, homography, src_shape, dst_shape) n1s.append(n1) n2s.append(n2) repeatabilities.append(repeatability) logging.info("Average number of points in the first image: {}".format(np.mean(n1s))) logging.info("Average number of points in the second image: {}".format(np.mean(n2s))) logging.info("The repeatability: {:.4f}".format(np.mean(repeatabilities))) logging.info("Time cost: {:.4f} sec".format(time.time() - tic)) def main(): """ Main body of script. """ args = parse_args() os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0" assert (args.batch_size == 1) _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset) ds_metainfo.update(args=args) ctx, batch_size = prepare_mx_context( num_gpus=args.num_gpus, batch_size=args.batch_size) net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), dtype=args.dtype, net_extra_kwargs=ds_metainfo.test_net_extra_kwargs, load_ignore_extra=False, classes=args.classes, in_channels=args.in_channels, do_hybridize=False, ctx=ctx) test_data = get_val_data_source( ds_metainfo=ds_metainfo, batch_size=args.batch_size, num_workers=args.num_workers) calc_detector_repeatability( test_data=test_data, net=net, ctx=ctx) if __name__ == "__main__": main()
9,800
30.213376
116
py
imgclsmob
imgclsmob-master/other/__init__.py
0
0
0
py
imgclsmob
imgclsmob-master/other/eval_gl_seg-.py
import os import argparse import time import logging import mxnet as mx from common.logger_utils import initialize_logging from gluon.utils import prepare_mx_context, prepare_model, calc_net_weight_count from gluon.model_stats import measure_model from gluon.seg_utils1 import add_dataset_parser_arguments, get_metainfo from gluon.seg_utils1 import batch_fn from gluon.seg_utils1 import get_test_data_source from gluon.seg_utils1 import validate1 from gluon.metrics.seg_metrics import PixelAccuracyMetric, MeanIoUMetric def add_eval_seg_parser_arguments(parser): parser.add_argument( '--model', type=str, required=True, help='type of model to use. see model_provider for options.') parser.add_argument( '--use-pretrained', action='store_true', help='enable using pretrained model from gluon.') parser.add_argument( '--dtype', type=str, default='float32', help='data type for training. default is float32') parser.add_argument( '--resume', type=str, default='', help='resume from previously saved parameters if not None') parser.add_argument( '--calc-flops', dest='calc_flops', action='store_true', help='calculate FLOPs') parser.add_argument( '--calc-flops-only', dest='calc_flops_only', action='store_true', help='calculate FLOPs without quality estimation') parser.add_argument( '--num-gpus', type=int, default=0, help='number of gpus to use.') parser.add_argument( '-j', '--num-data-workers', dest='num_workers', default=4, type=int, help='number of preprocessing workers') parser.add_argument( '--save-dir', type=str, default='', help='directory of saved models and log-files') parser.add_argument( '--logging-file-name', type=str, default='train.log', help='filename of training log') parser.add_argument( '--log-packages', type=str, default='mxnet', help='list of python packages for logging') parser.add_argument( '--log-pip-packages', type=str, default='mxnet-cu92, mxnet-cu100mkl, gluoncv', help='list of pip packages for logging') def parse_args(): parser = argparse.ArgumentParser( description='Evaluate a model for image segmentation (Gluon/VOC2012/ADE20K/Cityscapes/COCO)', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--dataset', type=str, default="VOC", help='dataset name. options are VOC, ADE20K, Cityscapes, COCO') args, _ = parser.parse_known_args() add_dataset_parser_arguments(parser, args.dataset) add_eval_seg_parser_arguments(parser) args = parser.parse_args() return args def test(net, test_data, data_source_needs_reset, dtype, ctx, input_image_size, in_channels, classes, calc_weight_count=False, calc_flops=False, calc_flops_only=True, extended_log=False, dataset_metainfo=None): assert (dataset_metainfo is not None) if not calc_flops_only: metric = mx.metric.CompositeEvalMetric() pix_acc_macro_average = False metric.add(PixelAccuracyMetric( vague_idx=dataset_metainfo["vague_idx"], use_vague=dataset_metainfo["use_vague"], macro_average=pix_acc_macro_average)) mean_iou_macro_average = False metric.add(MeanIoUMetric( num_classes=classes, vague_idx=dataset_metainfo["vague_idx"], use_vague=dataset_metainfo["use_vague"], bg_idx=dataset_metainfo["background_idx"], ignore_bg=dataset_metainfo["ignore_bg"], macro_average=mean_iou_macro_average)) tic = time.time() accuracy_info = validate1( accuracy_metric=metric, net=net, val_data=test_data, batch_fn=batch_fn, data_source_needs_reset=data_source_needs_reset, dtype=dtype, ctx=ctx) pix_acc = accuracy_info[1][0] mean_iou = accuracy_info[1][1] pix_macro = "macro" if pix_acc_macro_average else "micro" iou_macro = "macro" if mean_iou_macro_average else "micro" if extended_log: logging.info( "Test: {pix_macro}-pix_acc={pix_acc:.4f} ({pix_acc}), " "{iou_macro}-mean_iou={mean_iou:.4f} ({mean_iou})".format( pix_macro=pix_macro, pix_acc=pix_acc, iou_macro=iou_macro, mean_iou=mean_iou)) else: logging.info("Test: {pix_macro}-pix_acc={pix_acc:.4f}, {iou_macro}-mean_iou={mean_iou:.4f}".format( pix_macro=pix_macro, pix_acc=pix_acc, iou_macro=iou_macro, mean_iou=mean_iou)) logging.info("Time cost: {:.4f} sec".format( time.time() - tic)) if calc_weight_count: weight_count = calc_net_weight_count(net) if not calc_flops: logging.info("Model: {} trainable parameters".format(weight_count)) if calc_flops: num_flops, num_macs, num_params = measure_model(net, in_channels, input_image_size, ctx[0]) assert (not calc_weight_count) or (weight_count == num_params) stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \ " FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)" logging.info(stat_msg.format( params=num_params, params_m=num_params / 1e6, flops=num_flops, flops_m=num_flops / 1e6, flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6, macs=num_macs, macs_m=num_macs / 1e6)) def main(): os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0" args = parse_args() _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) ctx, batch_size = prepare_mx_context( num_gpus=args.num_gpus, batch_size=1) net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), dtype=args.dtype, net_extra_kwargs={"aux": False, "fixed_size": False}, load_ignore_extra=True, classes=args.num_classes, in_channels=args.in_channels, do_hybridize=False, ctx=ctx) input_image_size = net.in_size if hasattr(net, 'in_size') else (480, 480) test_data = get_test_data_source( dataset_name=args.dataset, dataset_dir=args.data_dir, batch_size=batch_size, num_workers=args.num_workers) assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only) test( net=net, test_data=test_data, data_source_needs_reset=False, dtype=args.dtype, ctx=ctx, input_image_size=input_image_size, in_channels=args.in_channels, classes=args.num_classes, # calc_weight_count=(not log_file_exist), calc_weight_count=True, calc_flops=args.calc_flops, calc_flops_only=args.calc_flops_only, extended_log=True, dataset_metainfo=get_metainfo(args.dataset)) if __name__ == '__main__': main()
7,626
32.897778
111
py
imgclsmob
imgclsmob-master/other/eval_pt_cifar-.py
import argparse import time import logging from common.logger_utils import initialize_logging from pytorch.model_stats import measure_model from pytorch.cifar1 import add_dataset_parser_arguments, get_val_data_loader from pytorch.utils import prepare_pt_context, prepare_model, calc_net_weight_count, validate1, AverageMeter def parse_args(): parser = argparse.ArgumentParser( description='Evaluate a model for image classification (PyTorch/CIFAR/SVHN)', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--dataset', type=str, default="CIFAR10", help='dataset name. options are CIFAR10, CIFAR100, and SVHN') args, _ = parser.parse_known_args() add_dataset_parser_arguments(parser, args.dataset) parser.add_argument( '--model', type=str, required=True, help='type of model to use. see model_provider for options.') parser.add_argument( '--use-pretrained', action='store_true', help='enable using pretrained model from github.') parser.add_argument( '--resume', type=str, default='', help='resume from previously saved parameters if not None') parser.add_argument( '--calc-flops', dest='calc_flops', action='store_true', help='calculate FLOPs') parser.add_argument( '--calc-flops-only', dest='calc_flops_only', action='store_true', help='calculate FLOPs without quality estimation') parser.add_argument( '--remove-module', action='store_true', help='enable if stored model has module') parser.add_argument( '--num-gpus', type=int, default=0, help='number of gpus to use.') parser.add_argument( '-j', '--num-data-workers', dest='num_workers', default=4, type=int, help='number of preprocessing workers') parser.add_argument( '--batch-size', type=int, default=512, help='training batch size per device (CPU/GPU).') parser.add_argument( '--save-dir', type=str, default='', help='directory of saved models and log-files') parser.add_argument( '--logging-file-name', type=str, default='train.log', help='filename of training log') parser.add_argument( '--log-packages', type=str, default='torch, torchvision', help='list of python packages for logging') parser.add_argument( '--log-pip-packages', type=str, default='', help='list of pip packages for logging') args = parser.parse_args() return args def test(net, val_data, use_cuda, input_image_size, in_channels, calc_weight_count=False, calc_flops=False, calc_flops_only=True, extended_log=False): if not calc_flops_only: accuracy_metric = AverageMeter() tic = time.time() err_val = validate1( accuracy_metric=accuracy_metric, net=net, val_data=val_data, use_cuda=use_cuda) if extended_log: logging.info('Test: err={err:.4f} ({err})'.format( err=err_val)) else: logging.info('Test: err={err:.4f}'.format( err=err_val)) logging.info('Time cost: {:.4f} sec'.format( time.time() - tic)) if calc_weight_count: weight_count = calc_net_weight_count(net) if not calc_flops: logging.info('Model: {} trainable parameters'.format(weight_count)) if calc_flops: num_flops, num_macs, num_params = measure_model(net, in_channels, input_image_size) assert (not calc_weight_count) or (weight_count == num_params) stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \ " FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)" logging.info(stat_msg.format( params=num_params, params_m=num_params / 1e6, flops=num_flops, flops_m=num_flops / 1e6, flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6, macs=num_macs, macs_m=num_macs / 1e6)) def main(): args = parse_args() _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) use_cuda, batch_size = prepare_pt_context( num_gpus=args.num_gpus, batch_size=args.batch_size) net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda, remove_module=args.remove_module) if hasattr(net, 'module'): input_image_size = net.module.in_size[0] if hasattr(net.module, 'in_size') else args.input_size else: input_image_size = net.in_size[0] if hasattr(net, 'in_size') else args.input_size val_data = get_val_data_loader( dataset_name=args.dataset, dataset_dir=args.data_dir, batch_size=batch_size, num_workers=args.num_workers) assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only) test( net=net, val_data=val_data, use_cuda=use_cuda, # calc_weight_count=(not log_file_exist), input_image_size=(input_image_size, input_image_size), in_channels=args.in_channels, calc_weight_count=True, calc_flops=args.calc_flops, calc_flops_only=args.calc_flops_only, extended_log=True) if __name__ == '__main__': main()
5,894
30.524064
107
py
imgclsmob
imgclsmob-master/other/eval_ch_in1k-.py
import math import time import logging import argparse import numpy as np from chainer import cuda, global_config import chainer.functions as F from chainercv.utils import apply_to_iterator from chainercv.utils import ProgressHook from common.logger_utils import initialize_logging from chainer_.top_k_accuracy1 import top_k_accuracy from chainer_.utils import prepare_model from chainer_.imagenet1k1 import add_dataset_parser_arguments from chainer_.imagenet1k1 import get_val_data_iterator from chainer_.imagenet1k1 import ImagenetPredictor def add_eval_parser_arguments(parser): parser.add_argument( "--model", type=str, required=True, help="type of model to use. see model_provider for options") parser.add_argument( "--use-pretrained", action="store_true", help="enable using pretrained model from github repo") parser.add_argument( "--resume", type=str, default="", help="resume from previously saved parameters") parser.add_argument( "--data-subset", type=str, default="val", help="data subset. options are val and test") parser.add_argument( "--num-gpus", type=int, default=0, help="number of gpus to use") parser.add_argument( "-j", "--num-data-workers", dest="num_workers", default=4, type=int, help="number of preprocessing workers") parser.add_argument( "--batch-size", type=int, default=512, help="training batch size per device (CPU/GPU)") parser.add_argument( "--save-dir", type=str, default="", help="directory of saved models and log-files") parser.add_argument( "--logging-file-name", type=str, default="train.log", help="filename of training log") parser.add_argument( "--log-packages", type=str, default="chainer, chainercv", help="list of python packages for logging") parser.add_argument( "--log-pip-packages", type=str, default="cupy-cuda100, chainer, chainercv", help="list of pip packages for logging") parser.add_argument( "--disable-cudnn-autotune", action="store_true", help="disable cudnn autotune for segmentation models") parser.add_argument( "--show-progress", action="store_true", help="show progress bar") def parse_args(): parser = argparse.ArgumentParser( description="Evaluate a model for image classification/segmentation (Chainer)", formatter_class=argparse.ArgumentDefaultsHelpFormatter) add_dataset_parser_arguments(parser) add_eval_parser_arguments(parser) args = parser.parse_args() return args def test(net, val_iterator, val_dataset_len, num_gpus, input_image_size=224, resize_inv_factor=0.875, calc_weight_count=False, extended_log=False): assert (resize_inv_factor > 0.0) resize_value = int(math.ceil(float(input_image_size) / resize_inv_factor)) tic = time.time() predictor = ImagenetPredictor( base_model=net, scale_size=resize_value, crop_size=input_image_size) if num_gpus > 0: predictor.to_gpu() if calc_weight_count: weight_count = net.count_params() logging.info("Model: {} trainable parameters".format(weight_count)) in_values, out_values, rest_values = apply_to_iterator( predictor.predict, val_iterator, hook=ProgressHook(val_dataset_len)) del in_values pred_probs, = out_values gt_labels, = rest_values y = np.array(list(pred_probs)) t = np.array(list(gt_labels)) top1_acc = F.accuracy( y=y, t=t).data top5_acc = top_k_accuracy( y=y, t=t, k=5).data err_top1_val = 1.0 - top1_acc err_top5_val = 1.0 - top5_acc if extended_log: logging.info("Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})".format( top1=err_top1_val, top5=err_top5_val)) else: logging.info("Test: err-top1={top1:.4f}\terr-top5={top5:.4f}".format( top1=err_top1_val, top5=err_top5_val)) logging.info("Time cost: {:.4f} sec".format( time.time() - tic)) def main(): args = parse_args() _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) global_config.train = False num_gpus = args.num_gpus if num_gpus > 0: cuda.get_device(0).use() net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_gpus=(num_gpus > 0)) num_classes = net.classes if hasattr(net, "classes") else 1000 input_image_size = net.in_size[0] if hasattr(net, "in_size") else args.input_size val_iterator, val_dataset_len = get_val_data_iterator( data_dir=args.data_dir, batch_size=args.batch_size, num_workers=args.num_workers, num_classes=num_classes) assert (args.use_pretrained or args.resume.strip()) test( net=net, val_iterator=val_iterator, val_dataset_len=val_dataset_len, num_gpus=num_gpus, input_image_size=input_image_size, resize_inv_factor=args.resize_inv_factor, calc_weight_count=True, extended_log=True) if __name__ == "__main__": main()
5,737
26.719807
95
py
imgclsmob
imgclsmob-master/other/eval_pt_mch.py
""" Script for evaluating trained image matching model on PyTorch (under development). """ import os import time import logging import argparse import numpy as np import torch from common.logger_utils import initialize_logging from pytorch.utils import prepare_pt_context, prepare_model from pytorch.dataset_utils import get_dataset_metainfo from pytorch.dataset_utils import get_val_data_source from pytorch.metrics.ret_metrics import PointDescriptionMatchRatio def add_eval_parser_arguments(parser): """ Create python script parameters (for eval specific subpart). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. """ parser.add_argument( "--model", type=str, required=True, help="type of model to use. see model_provider for options") parser.add_argument( "--use-pretrained", action="store_true", help="enable using pretrained model from github repo") parser.add_argument( "--dtype", type=str, default="float32", help="base data type for tensors") parser.add_argument( "--resume", type=str, default="", help="resume from previously saved parameters") parser.add_argument( "--calc-flops", dest="calc_flops", action="store_true", help="calculate FLOPs") parser.add_argument( "--calc-flops-only", dest="calc_flops_only", action="store_true", help="calculate FLOPs without quality estimation") parser.add_argument( "--data-subset", type=str, default="val", help="data subset. options are val and test") parser.add_argument( "--num-gpus", type=int, default=0, help="number of gpus to use") parser.add_argument( "-j", "--num-data-workers", dest="num_workers", default=4, type=int, help="number of preprocessing workers") parser.add_argument( "--batch-size", type=int, default=512, help="training batch size per device (CPU/GPU)") parser.add_argument( "--save-dir", type=str, default="", help="directory of saved models and log-files") parser.add_argument( "--logging-file-name", type=str, default="train.log", help="filename of training log") parser.add_argument( "--log-packages", type=str, default="mxnet, numpy", help="list of python packages for logging") parser.add_argument( "--log-pip-packages", type=str, default="mxnet-cu100", help="list of pip packages for logging") parser.add_argument( "--disable-cudnn-autotune", action="store_true", help="disable cudnn autotune for segmentation models") parser.add_argument( "--show-progress", action="store_true", help="show progress bar") def parse_args(): """ Parse python script parameters (common part). Returns: ------- ArgumentParser Resulted args. """ parser = argparse.ArgumentParser( description="Evaluate a model for image matching (PyTorch/HPatches)", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "--dataset", type=str, default="HPatches", help="dataset name") parser.add_argument( "--work-dir", type=str, default=os.path.join("..", "imgclsmob_data"), help="path to working directory only for dataset root path preset") args, _ = parser.parse_known_args() dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset) dataset_metainfo.add_dataset_parser_arguments( parser=parser, work_dir_path=args.work_dir) add_eval_parser_arguments(parser) args = parser.parse_args() return args class SuperPointFrontend(object): """ Wrapper around pytorch net to help with pre and post image processing. """ def __init__(self, nms_dist=4, conf_thresh=0.015, nn_thresh=0.7, cuda=True): self.nms_dist = nms_dist self.conf_thresh = conf_thresh self.nn_thresh = nn_thresh # L2 descriptor distance for good match. self.cell = 8 # Size of each output cell. Keep this fixed. self.border_remove = 4 # Remove points this close to the border. def nms_fast(self, in_corners, H, W, dist_thresh): """ Run a faster approximate Non-Max-Suppression on numpy corners shaped: 3xN [x_i,y_i,conf_i]^T Algo summary: Create a grid sized HxW. Assign each corner location a 1, rest are zeros. Iterate through all the 1's and convert them either to -1 or 0. Suppress points by setting nearby values to 0. Grid Value Legend: -1 : Kept. 0 : Empty or suppressed. 1 : To be processed (converted to either kept or supressed). NOTE: The NMS first rounds points to integers, so NMS distance might not be exactly dist_thresh. It also assumes points are within image boundaries. Inputs in_corners - 3xN numpy array with corners [x_i, y_i, confidence_i]^T. H - Image height. W - Image width. dist_thresh - Distance to suppress, measured as an infinty norm distance. Returns: nmsed_corners - 3xN numpy matrix with surviving corners. nmsed_inds - N length numpy vector with surviving corner indices. """ grid = np.zeros((H, W)).astype(int) # Track NMS data. inds = np.zeros((H, W)).astype(int) # Store indices of points. # Sort by confidence and round to nearest int. inds1 = np.argsort(-in_corners[2, :]) corners = in_corners[:, inds1] rcorners = corners[:2, :].round().astype(int) # Rounded corners. # Check for edge case of 0 or 1 corners. if rcorners.shape[1] == 0: return np.zeros((3, 0)).astype(int), np.zeros(0).astype(int) if rcorners.shape[1] == 1: out = np.vstack((rcorners, in_corners[2])).reshape(3, 1) return out, np.zeros((1)).astype(int) # Initialize the grid. for i, rc in enumerate(rcorners.T): grid[rcorners[1, i], rcorners[0, i]] = 1 inds[rcorners[1, i], rcorners[0, i]] = i # Pad the border of the grid, so that we can NMS points near the border. pad = dist_thresh grid = np.pad(grid, ((pad, pad), (pad, pad)), mode='constant') # Iterate through points, highest to lowest conf, suppress neighborhood. count = 0 for i, rc in enumerate(rcorners.T): # Account for top and left padding. pt = (rc[0] + pad, rc[1] + pad) if grid[pt[1], pt[0]] == 1: # If not yet suppressed. grid[pt[1] - pad:pt[1] + pad + 1, pt[0] - pad:pt[0] + pad + 1] = 0 grid[pt[1], pt[0]] = -1 count += 1 # Get all surviving -1's and return sorted array of remaining corners. keepy, keepx = np.where(grid == -1) keepy, keepx = keepy - pad, keepx - pad inds_keep = inds[keepy, keepx] out = corners[:, inds_keep] values = out[-1, :] inds2 = np.argsort(-values) out = out[:, inds2] out_inds = inds1[inds_keep[inds2]] return out, out_inds def run(self, net, img): """ Process a numpy image to extract points and descriptors. Input img - HxW numpy float32 input image in range [0,1]. Output corners - 3xN numpy array with corners [x_i, y_i, confidence_i]^T. desc - 256xN numpy array of corresponding unit normalized descriptors. heatmap - HxW numpy heatmap in range [0,1] of point confidences. """ import torch.nn as nn # assert img.ndim == 2, 'Image must be grayscale.' # assert img.dtype == np.float32, 'Image must be float32.' # H, W = img.shape[0], img.shape[1] # in_channels = img.copy() # in_channels = (in_channels.reshape(1, H, W)) # in_channels = torch.from_numpy(in_channels) # in_channels = torch.autograd.Variable(in_channels).view(1, 1, H, W) # if self.cuda: # in_channels = in_channels.cuda() inp = img H, W = img.shape[2], img.shape[3] # Forward pass of network. outs = net.forward(inp) semi, coarse_desc = outs[0], outs[1] # Convert pytorch -> numpy. semi = semi.data.cpu().numpy().squeeze() # --- Process points. dense = np.exp(semi) # Softmax. dense = dense / (np.sum(dense, axis=0) + .00001) # Should sum to 1. # Remove dustbin. nodust = dense[:-1, :, :] # Reshape to get full resolution heatmap. Hc = int(H / self.cell) Wc = int(W / self.cell) nodust = nodust.transpose(1, 2, 0) heatmap = np.reshape(nodust, [Hc, Wc, self.cell, self.cell]) heatmap = np.transpose(heatmap, [0, 2, 1, 3]) heatmap = np.reshape(heatmap, [Hc * self.cell, Wc * self.cell]) xs, ys = np.where(heatmap >= self.conf_thresh) # Confidence threshold. if len(xs) == 0: return np.zeros((3, 0)), None, None pts = np.zeros((3, len(xs))) # Populate point data sized 3xN. pts[0, :] = ys pts[1, :] = xs pts[2, :] = heatmap[xs, ys] pts, _ = self.nms_fast(pts, H, W, dist_thresh=self.nms_dist) # Apply NMS. inds = np.argsort(pts[2, :]) pts = pts[:, inds[::-1]] # Sort by confidence. # Remove points along border. bord = self.border_remove toremoveW = np.logical_or(pts[0, :] < bord, pts[0, :] >= (W - bord)) toremoveH = np.logical_or(pts[1, :] < bord, pts[1, :] >= (H - bord)) toremove = np.logical_or(toremoveW, toremoveH) pts = pts[:, ~toremove] # --- Process descriptor. D = coarse_desc.shape[1] if pts.shape[1] == 0: desc = np.zeros((D, 0)) else: # Interpolate into descriptor map using 2D point locations. samp_pts = torch.from_numpy(pts[:2, :].copy()) samp_pts[0, :] = (samp_pts[0, :] / (float(W) / 2.)) - 1. samp_pts[1, :] = (samp_pts[1, :] / (float(H) / 2.)) - 1. samp_pts = samp_pts.transpose(0, 1).contiguous() samp_pts = samp_pts.view(1, 1, -1, 2) samp_pts = samp_pts.float() # if self.cuda: # samp_pts = samp_pts.cuda() samp_pts = samp_pts.cuda() desc = nn.functional.grid_sample(coarse_desc, samp_pts) desc = desc.data.cpu().numpy().reshape(D, -1) desc /= np.linalg.norm(desc, axis=0)[np.newaxis, :] return pts, desc, heatmap def warp_keypoints(src_pts, homography): src_hmg_pts = np.concatenate([src_pts, np.ones((src_pts.shape[0], 1))], axis=1) dst_hmg_pts = np.dot(src_hmg_pts, np.transpose(homography)).squeeze(axis=2) dst_pts = dst_hmg_pts[:, :2] / dst_hmg_pts[:, 2:] return dst_pts def calc_filter_mask(pts, shape): mask = (pts[:, 0] >= 0) & (pts[:, 0] < shape[0]) & (pts[:, 1] >= 0) & (pts[:, 1] < shape[1]) return mask def select_k_best(pts, confs, max_count=300): inds = confs.argsort()[::-1][:max_count] return pts[inds, :], confs[inds] def calc_repeatability_np(src_pts, src_confs, dst_pts, dst_confs, homography, src_shape, dst_shape, distance_thresh=3): pred_src_pts = warp_keypoints(dst_pts, np.linalg.inv(homography)) pred_src_mask = calc_filter_mask(pred_src_pts, src_shape) label_dst_pts, label_dst_confs = dst_pts[pred_src_mask, :], dst_confs[pred_src_mask] pred_dst_pts = warp_keypoints(src_pts, homography) pred_dst_mask = calc_filter_mask(pred_dst_pts, dst_shape) pred_dst_pts, pred_dst_confs = pred_dst_pts[pred_dst_mask, :], src_confs[pred_dst_mask] label_dst_pts, label_dst_confs = select_k_best(label_dst_pts, label_dst_confs) pred_dst_pts, pred_dst_confs = select_k_best(pred_dst_pts, pred_dst_confs) n_pred = pred_dst_pts.shape[0] n_label = label_dst_pts.shape[0] label_dst_pts = np.stack([label_dst_pts[:, 0], label_dst_pts[:, 1], label_dst_confs], axis=1) pred_dst_pts = np.stack([pred_dst_pts[:, 0], pred_dst_pts[:, 1], pred_dst_confs], axis=1) pred_dst_pts = np.expand_dims(pred_dst_pts, 1) label_dst_pts = np.expand_dims(label_dst_pts, 0) norm = np.linalg.norm(pred_dst_pts - label_dst_pts, ord=None, axis=2) count1 = 0 count2 = 0 if n_label != 0: min1 = np.min(norm, axis=1) count1 = np.sum(min1 <= distance_thresh) if n_pred != 0: min2 = np.min(norm, axis=0) count2 = np.sum(min2 <= distance_thresh) if n_pred + n_label > 0: repeatability = (count1 + count2) / (n_pred + n_label) else: repeatability = 0 return n_pred, n_label, repeatability def calc_detector_repeatability(test_data, net, use_cuda): tic = time.time() repeatabilities = [] n1s = [] n2s = [] # det_metric = PointDetectionMatchRatio(pts_max_count=100) # det_metric.reset() desc_metric = PointDescriptionMatchRatio(pts_max_count=10) desc_metric.reset() with torch.no_grad(): for data_src, data_dst, target in test_data: if use_cuda: data_src = data_src.cuda(non_blocking=True) data_dst = data_dst.cuda(non_blocking=True) # spf = SuperPointFrontend() # src_pts, src_confs, src_desc_map = spf.run(net, data_src) # dst_pts, dst_confs, dst_desc_map = spf.run(net, data_dst) # src_pts = [src_pts.transpose()[:, [1, 0]].astype(np.int32)] # dst_pts = [dst_pts.transpose()[:, [1, 0]].astype(np.int32)] src_pts, src_confs, src_desc_map = net(data_src) dst_pts, dst_confs, dst_desc_map = net(data_dst) src_shape = data_src.cpu().detach().numpy().shape[2:] dst_shape = data_dst.cpu().detach().numpy().shape[2:] # print("data_src.shape={}".format(data_src.shape)) # print("data_dst.shape={}".format(data_dst.shape)) # import cv2 # scale_factor = 0.5 # num_pts = 100 # # src_img = data_src.squeeze(0).transpose(0, 2).transpose(0, 1).cpu().detach().numpy() # src_img = cv2.cvtColor(src_img, cv2.COLOR_GRAY2RGB) # for i in range(min(src_pts[0].shape[0], num_pts)): # assert (src_pts[0][i, 0] < src_shape[0]) # assert (src_pts[0][i, 1] < src_shape[1]) # # cv2.circle( # src_img, # (src_pts[0][i, 1], src_pts[0][i, 0]), # 5, # (0, 0, 255), # -1) # cv2.imshow( # winname="src_img", # mat=cv2.resize( # src=src_img, # dsize=None, # fx=scale_factor, # fy=scale_factor, # interpolation=cv2.INTER_NEAREST)) # # dst_img = data_dst.squeeze(0).transpose(0, 2).transpose(0, 1).cpu().detach().numpy() # dst_img = cv2.cvtColor(dst_img, cv2.COLOR_GRAY2RGB) # for i in range(min(dst_pts[0].shape[0], num_pts)): # assert (dst_pts[0][i, 0] < dst_shape[0]) # assert (dst_pts[0][i, 1] < dst_shape[1]) # # cv2.circle( # dst_img, # (dst_pts[0][i, 1], dst_pts[0][i, 0]), # 5, # (0, 0, 255), # -1) # cv2.imshow( # winname="dst_img", # mat=cv2.resize( # src=dst_img, # dsize=None, # fx=scale_factor, # fy=scale_factor, # interpolation=cv2.INTER_NEAREST)) # # cv2.waitKey(0) # for i in range(len(src_pts)): # homography = target.cpu().detach().numpy() # # src_pts_np = src_pts[i].cpu().detach().numpy() # src_confs_np = src_confs[i].cpu().detach().numpy() # # dst_pts_np = dst_pts[i].cpu().detach().numpy() # dst_confs_np = dst_confs[i].cpu().detach().numpy() # # n1, n2, repeatability = calc_repeatability_np( # src_pts_np, # src_confs_np, # dst_pts_np, # dst_confs_np, # homography, # src_shape, # dst_shape) # n1s.append(n1) # n2s.append(n2) # repeatabilities.append(repeatability) # det_metric.update_alt( # homography=target[0], # src_pts=src_pts[0], # dst_pts=dst_pts[0], # src_confs=src_confs[0], # dst_confs=dst_confs[0], # src_img_size=src_shape, # dst_img_size=dst_shape) desc_metric.update_alt( homography=target[0], src_pts=src_pts[0], dst_pts=dst_pts[0], src_descs=src_desc_map[0], dst_descs=dst_desc_map[0], src_img_size=src_shape, dst_img_size=dst_shape) logging.info("Average number of points in the first image: {}".format(np.mean(n1s))) logging.info("Average number of points in the second image: {}".format(np.mean(n2s))) logging.info("The repeatability: {:.4f}".format(np.mean(repeatabilities))) logging.info("Time cost: {:.4f} sec".format(time.time() - tic)) def main(): """ Main body of script. """ args = parse_args() os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0" assert (args.batch_size == 1) _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset) ds_metainfo.update(args=args) use_cuda, batch_size = prepare_pt_context( num_gpus=args.num_gpus, batch_size=args.batch_size) net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda, net_extra_kwargs=ds_metainfo.test_net_extra_kwargs, load_ignore_extra=False, num_classes=args.classes, in_channels=args.in_channels, remove_module=False) test_data = get_val_data_source( ds_metainfo=ds_metainfo, batch_size=args.batch_size, num_workers=args.num_workers) calc_detector_repeatability( test_data=test_data, net=net, use_cuda=use_cuda) if __name__ == "__main__": main()
19,664
35.620112
98
py
imgclsmob
imgclsmob-master/other/eval_pt_cub-.py
import argparse import time import logging from common.logger_utils import initialize_logging from pytorch.model_stats import measure_model from pytorch.cub200_2011_utils1 import add_dataset_parser_arguments, get_val_data_loader from pytorch.utils import prepare_pt_context, prepare_model, calc_net_weight_count, AverageMeter # from pytorch.utils import validate from pytorch.utils import validate1 def parse_args(): parser = argparse.ArgumentParser( description='Evaluate a model for image classification (PyTorch/CUB-200-2011)', formatter_class=argparse.ArgumentDefaultsHelpFormatter) add_dataset_parser_arguments(parser) parser.add_argument( '--model', type=str, required=True, help='type of model to use. see model_provider for options.') parser.add_argument( '--use-pretrained', action='store_true', help='enable using pretrained model from github.') parser.add_argument( '--resume', type=str, default='', help='resume from previously saved parameters if not None') parser.add_argument( '--calc-flops', dest='calc_flops', action='store_true', help='calculate FLOPs') parser.add_argument( '--calc-flops-only', dest='calc_flops_only', action='store_true', help='calculate FLOPs without quality estimation') parser.add_argument( '--remove-module', action='store_true', help='enable if stored model has module') parser.add_argument( '--num-gpus', type=int, default=0, help='number of gpus to use.') parser.add_argument( '-j', '--num-data-workers', dest='num_workers', default=4, type=int, help='number of preprocessing workers') parser.add_argument( '--batch-size', type=int, default=32, help='training batch size per device (CPU/GPU).') parser.add_argument( '--save-dir', type=str, default='', help='directory of saved models and log-files') parser.add_argument( '--logging-file-name', type=str, default='train.log', help='filename of training log') parser.add_argument( '--log-packages', type=str, default='torch, torchvision', help='list of python packages for logging') parser.add_argument( '--log-pip-packages', type=str, default='', help='list of pip packages for logging') args = parser.parse_args() return args # def test(net, # val_data, # use_cuda, # input_image_size, # in_channels, # calc_weight_count=False, # calc_flops=False, # calc_flops_only=True, # extended_log=False): # if not calc_flops_only: # acc_top1 = AverageMeter() # acc_top5 = AverageMeter() # tic = time.time() # err_top1_val, err_top5_val = validate( # acc_top1=acc_top1, # acc_top5=acc_top5, # net=net, # val_data=val_data, # use_cuda=use_cuda) # if extended_log: # logging.info('Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})'.format( # top1=err_top1_val, top5=err_top5_val)) # else: # logging.info('Test: err-top1={top1:.4f}\terr-top5={top5:.4f}'.format( # top1=err_top1_val, top5=err_top5_val)) # logging.info('Time cost: {:.4f} sec'.format( # time.time() - tic)) # # if calc_weight_count: # weight_count = calc_net_weight_count(net) # if not calc_flops: # logging.info('Model: {} trainable parameters'.format(weight_count)) # if calc_flops: # num_flops, num_macs, num_params = measure_model(net, in_channels, input_image_size) # assert (not calc_weight_count) or (weight_count == num_params) # stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \ # " FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)" # logging.info(stat_msg.format( # params=num_params, params_m=num_params / 1e6, # flops=num_flops, flops_m=num_flops / 1e6, # flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6, # macs=num_macs, macs_m=num_macs / 1e6)) def test(net, val_data, use_cuda, input_image_size, in_channels, calc_weight_count=False, calc_flops=False, calc_flops_only=True, extended_log=False): if not calc_flops_only: accuracy_metric = AverageMeter() tic = time.time() err_val = validate1( accuracy_metric=accuracy_metric, net=net, val_data=val_data, use_cuda=use_cuda) if extended_log: logging.info('Test: err={err:.4f} ({err})'.format( err=err_val)) else: logging.info('Test: err={err:.4f}'.format( err=err_val)) logging.info('Time cost: {:.4f} sec'.format( time.time() - tic)) if calc_weight_count: weight_count = calc_net_weight_count(net) if not calc_flops: logging.info('Model: {} trainable parameters'.format(weight_count)) if calc_flops: num_flops, num_macs, num_params = measure_model(net, in_channels, input_image_size) assert (not calc_weight_count) or (weight_count == num_params) stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \ " FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)" logging.info(stat_msg.format( params=num_params, params_m=num_params / 1e6, flops=num_flops, flops_m=num_flops / 1e6, flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6, macs=num_macs, macs_m=num_macs / 1e6)) def main(): args = parse_args() _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) use_cuda, batch_size = prepare_pt_context( num_gpus=args.num_gpus, batch_size=args.batch_size) net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda, remove_module=args.remove_module) if hasattr(net, 'module'): input_image_size = net.module.in_size[0] if hasattr(net.module, 'in_size') else args.input_size else: input_image_size = net.in_size[0] if hasattr(net, 'in_size') else args.input_size val_data = get_val_data_loader( dataset_dir=args.data_dir, batch_size=batch_size, num_workers=args.num_workers, input_image_size=input_image_size, resize_inv_factor=args.resize_inv_factor) assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only) test( net=net, val_data=val_data, use_cuda=use_cuda, # calc_weight_count=(not log_file_exist), input_image_size=(input_image_size, input_image_size), in_channels=args.in_channels, calc_weight_count=True, calc_flops=args.calc_flops, calc_flops_only=args.calc_flops_only, extended_log=True) if __name__ == '__main__': main()
7,660
32.748899
103
py
imgclsmob
imgclsmob-master/other/chainer_/top_k_accuracy1.py
import six from chainer.backends import cuda from chainer.function import Function from chainer.utils import type_check class TopKAccuracy(Function): def __init__(self, k=1): self.k = k def check_type_forward(self, in_types): type_check._argname(in_types, ('x', 't')) x_type, t_type = in_types type_check.expect( x_type.dtype.kind == 'f', t_type.dtype.kind == 'i' ) t_ndim = type_check.eval(t_type.ndim) type_check.expect( x_type.ndim >= t_type.ndim, x_type.shape[0] == t_type.shape[0], x_type.shape[2: t_ndim + 1] == t_type.shape[1:] ) for i in six.moves.range(t_ndim + 1, type_check.eval(x_type.ndim)): type_check.expect(x_type.shape[i] == 1) def forward(self, inputs): xp = cuda.get_array_module(*inputs) y, t = inputs argsorted_pred = xp.argsort(y)[:, -self.k:] return xp.asarray(xp.any(argsorted_pred.T == t, axis=0).mean(dtype=xp.float32)), def top_k_accuracy(y, t, k=1): return TopKAccuracy(k=k)(y, t)
1,111
26.8
88
py
imgclsmob
imgclsmob-master/other/chainer_/imagenet1k1.py
import math import os import numpy as np import chainer from chainer import iterators from chainer import Chain from chainer.dataset import DatasetMixin from chainercv.transforms import scale from chainercv.transforms import center_crop from chainercv.datasets import directory_parsing_label_names from chainercv.datasets import DirectoryParsingLabelDataset __all__ = ['add_dataset_parser_arguments', 'get_val_data_iterator', 'get_data_iterators', 'ImagenetPredictor'] def add_dataset_parser_arguments(parser): parser.add_argument( '--data-dir', type=str, default='../imgclsmob_data/imagenet', help='path to directory with ImageNet-1K dataset') parser.add_argument( '--input-size', type=int, default=224, help='size of the input for model') parser.add_argument( '--resize-inv-factor', type=float, default=0.875, help='inverted ratio for input image crop') parser.add_argument( '--num-classes', type=int, default=1000, help='number of classes') parser.add_argument( '--in-channels', type=int, default=3, help='number of input channels') class ImagenetPredictor(Chain): def __init__(self, base_model, scale_size=256, crop_size=224, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): super(ImagenetPredictor, self).__init__() self.scale_size = scale_size if isinstance(crop_size, int): crop_size = (crop_size, crop_size) self.crop_size = crop_size self.mean = np.array(mean, np.float32)[:, np.newaxis, np.newaxis] self.std = np.array(std, np.float32)[:, np.newaxis, np.newaxis] with self.init_scope(): self.model = base_model def _preprocess(self, img): img = scale(img=img, size=self.scale_size) img = center_crop(img, self.crop_size) img /= 255.0 img -= self.mean img /= self.std return img def predict(self, imgs): imgs = self.xp.asarray([self._preprocess(img) for img in imgs]) with chainer.using_config('train', False), chainer.function.no_backprop_mode(): imgs = chainer.Variable(imgs) predictions = self.model(imgs) output = chainer.backends.cuda.to_cpu(predictions.array) return output class PreprocessedDataset(DatasetMixin): def __init__(self, root, scale_size=256, crop_size=224, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): self.base = DirectoryParsingLabelDataset(root) self.scale_size = scale_size if isinstance(crop_size, int): crop_size = (crop_size, crop_size) self.crop_size = crop_size self.mean = np.array(mean, np.float32)[:, np.newaxis, np.newaxis] self.std = np.array(std, np.float32)[:, np.newaxis, np.newaxis] def __len__(self): return len(self.base) def _preprocess(self, img): img = scale(img=img, size=self.scale_size) img = center_crop(img, self.crop_size) img /= 255.0 img -= self.mean img /= self.std return img def get_example(self, i): image, label = self.base[i] image = self._preprocess(image) return image, label def get_val_data_iterator(data_dir, batch_size, num_workers, num_classes): val_dir_path = os.path.join(data_dir, 'val') val_dataset = DirectoryParsingLabelDataset(val_dir_path) val_dataset_len = len(val_dataset) assert(len(directory_parsing_label_names(val_dir_path)) == num_classes) val_iterator = iterators.MultiprocessIterator( dataset=val_dataset, batch_size=batch_size, repeat=False, shuffle=False, n_processes=num_workers, shared_mem=300000000) return val_iterator, val_dataset_len def get_data_iterators(data_dir, batch_size, num_workers, num_classes, input_image_size=224, resize_inv_factor=0.875): assert (resize_inv_factor > 0.0) resize_value = int(math.ceil(float(input_image_size) / resize_inv_factor)) train_dir_path = os.path.join(data_dir, 'train') train_dataset = PreprocessedDataset( root=train_dir_path, scale_size=resize_value, crop_size=input_image_size) assert(len(directory_parsing_label_names(train_dir_path)) == num_classes) val_dir_path = os.path.join(data_dir, 'val') val_dataset = PreprocessedDataset( root=val_dir_path, scale_size=resize_value, crop_size=input_image_size) assert (len(directory_parsing_label_names(val_dir_path)) == num_classes) train_iterator = iterators.MultiprocessIterator( dataset=train_dataset, batch_size=batch_size, repeat=False, shuffle=True, n_processes=num_workers) val_iterator = iterators.MultiprocessIterator( dataset=val_dataset, batch_size=batch_size, repeat=False, shuffle=False, n_processes=num_workers) return train_iterator, val_iterator
5,436
29.717514
110
py
imgclsmob
imgclsmob-master/other/chainer_/train_ch_in1k.py
import argparse import numpy as np import chainer from chainer import cuda from chainer import training from chainer.training import extensions from chainer.serializers import save_npz from common.logger_utils import initialize_logging from chainer_.utils import prepare_model from chainer_.imagenet1k1 import add_dataset_parser_arguments from chainer_.imagenet1k1 import get_data_iterators def parse_args(): parser = argparse.ArgumentParser( description='Train a model for image classification (Chainer/ImageNet-1K)', formatter_class=argparse.ArgumentDefaultsHelpFormatter) add_dataset_parser_arguments(parser) parser.add_argument( '--model', type=str, required=True, help='type of model to use. see model_provider for options.') parser.add_argument( '--use-pretrained', action='store_true', help='enable using pretrained model from gluon.') parser.add_argument( '--resume', type=str, default='', help='resume from previously saved parameters if not None') parser.add_argument( '--resume-state', type=str, default='', help='resume from previously saved optimizer state if not None') parser.add_argument( '--num-gpus', type=int, default=0, help='number of gpus to use.') parser.add_argument( '-j', '--num-data-workers', dest='num_workers', default=4, type=int, help='number of preprocessing workers') parser.add_argument( '--batch-size', type=int, default=512, help='training batch size per device (CPU/GPU).') parser.add_argument( '--num-epochs', type=int, default=120, help='number of training epochs.') parser.add_argument( '--start-epoch', type=int, default=1, help='starting epoch for resuming, default is 1 for new training') parser.add_argument( '--attempt', type=int, default=1, help='current number of training') parser.add_argument( '--optimizer-name', type=str, default='nag', help='optimizer name') parser.add_argument( '--lr', type=float, default=0.1, help='learning rate. default is 0.1') parser.add_argument( '--lr-mode', type=str, default='cosine', help='learning rate scheduler mode. options are step, poly and cosine') parser.add_argument( '--lr-decay', type=float, default=0.1, help='decay rate of learning rate. default is 0.1') parser.add_argument( '--lr-decay-period', type=int, default=0, help='interval for periodic learning rate decays. default is 0 to disable.') parser.add_argument( '--lr-decay-epoch', type=str, default='40,60', help='epoches at which learning rate decays. default is 40,60.') parser.add_argument( '--target-lr', type=float, default=1e-8, help='ending learning rate; default is 1e-8') parser.add_argument( '--momentum', type=float, default=0.9, help='momentum value for optimizer; default is 0.9') parser.add_argument( '--wd', type=float, default=0.0001, help='weight decay rate. default is 0.0001.') parser.add_argument( '--log-interval', type=int, default=50, help='number of batches to wait before logging.') parser.add_argument( '--save-interval', type=int, default=4, help='saving parameters epoch interval, best model will always be saved') parser.add_argument( '--save-dir', type=str, default='', help='directory of saved models and log-files') parser.add_argument( '--logging-file-name', type=str, default='train.log', help='filename of training log') parser.add_argument( '--seed', type=int, default=-1, help='Random seed to be fixed') parser.add_argument( '--log-packages', type=str, default='mxnet', help='list of python packages for logging') parser.add_argument( '--log-pip-packages', type=str, default='mxnet-cu92, cupy-cuda100, gluoncv', help='list of pip packages for logging') args = parser.parse_args() return args def init_rand(seed): if seed <= 0: seed = np.random.randint(10000) return seed def prepare_trainer(net, optimizer_name, lr, momentum, num_epochs, train_iter, val_iter, logging_dir_path, num_gpus=0): if optimizer_name == "sgd": optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum) elif optimizer_name == "nag": optimizer = chainer.optimizers.NesterovAG(lr=lr, momentum=momentum) else: raise Exception('Unsupported optimizer: {}'.format(optimizer_name)) optimizer.setup(net) # devices = tuple(range(num_gpus)) if num_gpus > 0 else (-1, ) devices = (0,) if num_gpus > 0 else (-1,) updater = training.updaters.StandardUpdater( iterator=train_iter, optimizer=optimizer, device=devices[0]) trainer = training.Trainer( updater=updater, stop_trigger=(num_epochs, 'epoch'), out=logging_dir_path) val_interval = 100000, 'iteration' log_interval = 1000, 'iteration' trainer.extend( extension=extensions.Evaluator( val_iter, net, device=devices[0]), trigger=val_interval) trainer.extend(extensions.dump_graph('main/loss')) trainer.extend(extensions.snapshot(), trigger=val_interval) trainer.extend( extensions.snapshot_object( net, 'model_iter_{.updater.iteration}'), trigger=val_interval) trainer.extend(extensions.LogReport(trigger=log_interval)) trainer.extend(extensions.observe_lr(), trigger=log_interval) trainer.extend( extensions.PrintReport([ 'epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'lr']), trigger=log_interval) trainer.extend(extensions.ProgressBar(update_interval=10)) return trainer def save_params(file_stem, net, trainer): save_npz( file=file_stem + '.npz', obj=net) save_npz( file=file_stem + '.states', obj=trainer) def main(): args = parse_args() args.seed = init_rand(seed=args.seed) _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) num_gpus = args.num_gpus if num_gpus > 0: cuda.get_device(0).use() batch_size = args.batch_size net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), num_gpus=num_gpus) num_classes = net.classes if hasattr(net, 'classes') else 1000 input_image_size = net.in_size[0] if hasattr(net, 'in_size') else args.input_size train_iter, val_iter = get_data_iterators( data_dir=args.data_dir, batch_size=batch_size, num_workers=args.num_workers, num_classes=num_classes, input_image_size=input_image_size, resize_inv_factor=args.resize_inv_factor) trainer = prepare_trainer( net=net, optimizer_name=args.optimizer_name, lr=args.lr, momentum=args.momentum, num_epochs=args.num_epochs, train_iter=train_iter, val_iter=val_iter, logging_dir_path=args.save_dir, num_gpus=num_gpus) # if args.save_dir and args.save_interval: # lp_saver = TrainLogParamSaver( # checkpoint_file_name_prefix='imagenet_{}'.format(args.model), # last_checkpoint_file_name_suffix="last", # best_checkpoint_file_name_suffix=None, # last_checkpoint_dir_path=args.save_dir, # best_checkpoint_dir_path=None, # last_checkpoint_file_count=2, # best_checkpoint_file_count=2, # checkpoint_file_save_callback=save_params, # checkpoint_file_exts=['.npz', '.states'], # save_interval=args.save_interval, # num_epochs=args.num_epochs, # param_names=['Val.Top1', 'Train.Top1', 'Val.Top5', 'Train.Loss', 'LR'], # acc_ind=2, # # bigger=[True], # # mask=None, # score_log_file_path=os.path.join(args.save_dir, 'score.log'), # score_log_attempt_value=args.attempt, # best_map_log_file_path=os.path.join(args.save_dir, 'best_map.log')) # else: # lp_saver = None trainer.run() if __name__ == '__main__': main()
9,308
29.224026
115
py