repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/resnesta.py | """
ResNeSt(A) with average downsampling for ImageNet-1K, implemented in TensorFlow.
Original paper: 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
"""
__all__ = ['ResNeStA', 'resnestabc14', 'resnesta18', 'resnestabc26', 'resnesta50', 'resnesta101', 'resnesta152',
'resnesta200', 'resnesta269', 'ResNeStADownBlock']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, saconv3x3_block, AvgPool2d, SimpleSequential, is_channels_first
from .senet import SEInitBlock
class ResNeStABlock(nn.Layer):
"""
Simple ResNeSt(A) block for residual path in ResNeSt(A) unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=False,
use_bn=True,
data_format="channels_last",
**kwargs):
super(ResNeStABlock, self).__init__(**kwargs)
self.resize = (strides > 1)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv1")
if self.resize:
self.pool = AvgPool2d(
pool_size=3,
strides=strides,
padding=1,
data_format=data_format,
name="pool")
self.conv2 = saconv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
activation=None,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
if self.resize:
x = self.pool(x)
x = self.conv2(x, training=training)
return x
class ResNeStABottleneck(nn.Layer):
"""
ResNeSt(A) bottleneck block for residual path in ResNeSt(A) unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck_factor : int, default 4
Bottleneck factor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck_factor=4,
data_format="channels_last",
**kwargs):
super(ResNeStABottleneck, self).__init__(**kwargs)
self.resize = (strides > 1)
mid_channels = out_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = saconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv2")
if self.resize:
self.pool = AvgPool2d(
pool_size=3,
strides=strides,
padding=1,
data_format=data_format,
name="pool")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
if self.resize:
x = self.pool(x)
x = self.conv3(x, training=training)
return x
class ResNeStADownBlock(nn.Layer):
"""
ResNeSt(A) downsample block for the identity branch of a residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
data_format="channels_last",
**kwargs):
super(ResNeStADownBlock, self).__init__(**kwargs)
self.pool = AvgPool2d(
pool_size=strides,
strides=strides,
ceil_mode=True,
data_format=data_format,
name="pool")
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.pool(x)
x = self.conv(x, training=training)
return x
class ResNeStAUnit(nn.Layer):
"""
ResNeSt(A) unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck=True,
data_format="channels_last",
**kwargs):
super(ResNeStAUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
if bottleneck:
self.body = ResNeStABottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="body")
else:
self.body = ResNeStABlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_block = ResNeStADownBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="identity_block")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_block(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
return x
class ResNeStA(tf.keras.Model):
"""
ResNeSt(A) with average downsampling model from 'ResNeSt: Split-Attention Networks,'
https://arxiv.org/abs/2004.08955.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
dropout_rate=0.0,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ResNeStA, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResNeStAUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D(
data_format=data_format,
name="final_pool"))
self.output1 = SimpleSequential(name="output1")
if dropout_rate > 0.0:
self.output1.add(nn.Dropout(
rate=dropout_rate,
name="output1/dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=in_channels,
name="output1/fc"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x)
return x
def get_resnesta(blocks,
bottleneck=None,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ResNeSt(A) with average downsampling model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported ResNeSt(A) with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if blocks >= 101:
init_block_channels *= 2
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNeStA(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def resnestabc14(**kwargs):
"""
ResNeSt(A)-BC-14 with average downsampling model from 'ResNeSt: Split-Attention Networks,'
https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=14, bottleneck=True, model_name="resnestabc14", **kwargs)
def resnesta18(**kwargs):
"""
ResNeSt(A)-18 with average downsampling model from 'ResNeSt: Split-Attention Networks,'
https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=18, model_name="resnesta18", **kwargs)
def resnestabc26(**kwargs):
"""
ResNeSt(A)-BC-26 with average downsampling model from 'ResNeSt: Split-Attention Networks,'
https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=26, bottleneck=True, model_name="resnestabc26", **kwargs)
def resnesta50(**kwargs):
"""
ResNeSt(A)-50 with average downsampling model with stride at the second convolution in bottleneck block
from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=50, model_name="resnesta50", **kwargs)
def resnesta101(**kwargs):
"""
ResNeSt(A)-101 with average downsampling model with stride at the second convolution in bottleneck
block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=101, model_name="resnesta101", **kwargs)
def resnesta152(**kwargs):
"""
ResNeSt(A)-152 with average downsampling model with stride at the second convolution in bottleneck
block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=152, model_name="resnesta152", **kwargs)
def resnesta200(in_size=(256, 256), **kwargs):
"""
ResNeSt(A)-200 with average downsampling model with stride at the second convolution in bottleneck
block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
in_size : tuple of two ints, default (256, 256)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=200, in_size=in_size, dropout_rate=0.2, model_name="resnesta200", **kwargs)
def resnesta269(in_size=(320, 320), **kwargs):
"""
ResNeSt(A)-269 with average downsampling model with stride at the second convolution in bottleneck
block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
in_size : tuple of two ints, default (320, 320)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=269, in_size=in_size, dropout_rate=0.2, model_name="resnesta269", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
(resnestabc14, 224),
(resnesta18, 224),
(resnestabc26, 224),
(resnesta50, 224),
(resnesta101, 224),
(resnesta152, 224),
(resnesta200, 256),
(resnesta269, 320),
]
for model, size in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, size, size) if is_channels_first(data_format) else (batch, size, size, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnestabc14 or weight_count == 10611688)
assert (model != resnesta18 or weight_count == 12763784)
assert (model != resnestabc26 or weight_count == 17069448)
assert (model != resnesta50 or weight_count == 27483240)
assert (model != resnesta101 or weight_count == 48275016)
assert (model != resnesta152 or weight_count == 65316040)
assert (model != resnesta200 or weight_count == 70201544)
assert (model != resnesta269 or weight_count == 110929480)
if __name__ == "__main__":
_test()
| 19,800 | 32.561017 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/senet.py | """
SENet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SENet', 'senet16', 'senet28', 'senet40', 'senet52', 'senet103', 'senet154', 'SEInitBlock']
import os
import math
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, SEBlock, MaxPool2d, SimpleSequential, flatten
class SENetBottleneck(nn.Layer):
"""
SENet bottleneck block for residual path in SENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
data_format="channels_last",
**kwargs):
super(SENetBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
group_width2 = group_width // 2
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=group_width2,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=group_width2,
out_channels=group_width,
strides=strides,
groups=cardinality,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class SENetUnit(nn.Layer):
"""
SENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
identity_conv3x3 : bool, default False
Whether to use 3x3 convolution in the identity link.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
identity_conv3x3,
data_format="channels_last",
**kwargs):
super(SENetUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = SENetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
data_format=data_format,
name="body")
self.se = SEBlock(
channels=out_channels,
data_format=data_format,
name="se")
if self.resize_identity:
if identity_conv3x3:
self.identity_conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
else:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = self.se(x)
x = x + identity
x = self.activ(x)
return x
class SEInitBlock(nn.Layer):
"""
SENet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(SEInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
data_format=data_format,
name="conv3")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
x = self.pool(x)
return x
class SENet(tf.keras.Model):
"""
SENet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SENet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
identity_conv3x3 = (i != 0)
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SENetUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
identity_conv3x3=identity_conv3x3,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = SimpleSequential(name="output1")
self.output1.add(nn.Dropout(
rate=0.2,
name="dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=in_channels,
name="fc"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_senet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SENet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 16:
layers = [1, 1, 1, 1]
cardinality = 32
elif blocks == 28:
layers = [2, 2, 2, 2]
cardinality = 32
elif blocks == 40:
layers = [3, 3, 3, 3]
cardinality = 32
elif blocks == 52:
layers = [3, 4, 6, 3]
cardinality = 32
elif blocks == 103:
layers = [3, 4, 23, 3]
cardinality = 32
elif blocks == 154:
layers = [3, 8, 36, 3]
cardinality = 64
else:
raise ValueError("Unsupported SENet with number of blocks: {}".format(blocks))
bottleneck_width = 4
init_block_channels = 128
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SENet(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def senet16(**kwargs):
"""
SENet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=16, model_name="senet16", **kwargs)
def senet28(**kwargs):
"""
SENet-28 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=28, model_name="senet28", **kwargs)
def senet40(**kwargs):
"""
SENet-40 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=40, model_name="senet40", **kwargs)
def senet52(**kwargs):
"""
SENet-52 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=52, model_name="senet52", **kwargs)
def senet103(**kwargs):
"""
SENet-103 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=103, model_name="senet103", **kwargs)
def senet154(**kwargs):
"""
SENet-154 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=154, model_name="senet154", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
senet16,
senet28,
senet40,
senet52,
senet103,
senet154,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != senet16 or weight_count == 31366168)
assert (model != senet28 or weight_count == 36453768)
assert (model != senet40 or weight_count == 41541368)
assert (model != senet52 or weight_count == 44659416)
assert (model != senet103 or weight_count == 60963096)
assert (model != senet154 or weight_count == 115088984)
if __name__ == "__main__":
_test()
| 15,060 | 30.574423 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/simplepose_coco.py | """
SimplePose for COCO Keypoint, implemented in TensorFlow.
Original paper: 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
"""
__all__ = ['SimplePose', 'simplepose_resnet18_coco', 'simplepose_resnet50b_coco', 'simplepose_resnet101b_coco',
'simplepose_resnet152b_coco', 'simplepose_resneta50b_coco', 'simplepose_resneta101b_coco',
'simplepose_resneta152b_coco']
import os
import tensorflow as tf
from .common import DeconvBlock, conv1x1, HeatmapMaxDetBlock, SimpleSequential, is_channels_first
from .resnet import resnet18, resnet50b, resnet101b, resnet152b
from .resneta import resneta50b, resneta101b, resneta152b
class SimplePose(tf.keras.Model):
"""
SimplePose model from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
channels : list of int
Number of output channels for each decoder unit.
return_heatmap : bool, default False
Whether to return only heatmap.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 192)
Spatial size of the expected input image.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
return_heatmap=False,
in_channels=3,
in_size=(256, 192),
keypoints=17,
data_format="channels_last",
**kwargs):
super(SimplePose, self).__init__(**kwargs)
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.return_heatmap = return_heatmap
self.data_format = data_format
self.backbone = backbone
self.backbone._name = "backbone"
self.decoder = SimpleSequential(name="decoder")
in_channels = backbone_out_channels
for i, out_channels in enumerate(channels):
self.decoder.add(DeconvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=4,
strides=2,
padding=1,
data_format=data_format,
name="unit{}".format(i + 1)))
in_channels = out_channels
self.decoder.add(conv1x1(
in_channels=in_channels,
out_channels=keypoints,
use_bias=True,
data_format=data_format,
name="final_block"))
self.heatmap_max_det = HeatmapMaxDetBlock(
data_format=data_format,
name="heatmap_max_det")
def call(self, x, training=None):
x = self.backbone(x, training=training)
heatmap = self.decoder(x, training=training)
if self.return_heatmap or not tf.executing_eagerly():
return heatmap
else:
keypoints = self.heatmap_max_det(heatmap)
return keypoints
def get_simplepose(backbone,
backbone_out_channels,
keypoints,
model_name=None,
data_format="channels_last",
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SimplePose model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
keypoints : int
Number of keypoints.
model_name : str or None, default None
Model name for loading pretrained model.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
channels = [256, 256, 256]
net = SimplePose(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
keypoints=keypoints,
data_format=data_format,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def simplepose_resnet18_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs):
"""
SimplePose model on the base of ResNet-18 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and
Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnet18(pretrained=pretrained_backbone, data_format=data_format).features
del backbone.children[-1]
return get_simplepose(backbone=backbone, backbone_out_channels=512, keypoints=keypoints,
model_name="simplepose_resnet18_coco", data_format=data_format, **kwargs)
def simplepose_resnet50b_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs):
"""
SimplePose model on the base of ResNet-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and
Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone, data_format=data_format).features
del backbone.children[-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="simplepose_resnet50b_coco", data_format=data_format, **kwargs)
def simplepose_resnet101b_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs):
"""
SimplePose model on the base of ResNet-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnet101b(pretrained=pretrained_backbone, data_format=data_format).features
del backbone.children[-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="simplepose_resnet101b_coco", data_format=data_format, **kwargs)
def simplepose_resnet152b_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs):
"""
SimplePose model on the base of ResNet-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnet152b(pretrained=pretrained_backbone, data_format=data_format).features
del backbone.children[-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="simplepose_resnet152b_coco", data_format=data_format, **kwargs)
def simplepose_resneta50b_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs):
"""
SimplePose model on the base of ResNet(A)-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resneta50b(pretrained=pretrained_backbone, data_format=data_format).features
del backbone.children[-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="simplepose_resneta50b_coco", data_format=data_format, **kwargs)
def simplepose_resneta101b_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs):
"""
SimplePose model on the base of ResNet(A)-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resneta101b(pretrained=pretrained_backbone, data_format=data_format).features
del backbone.children[-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="simplepose_resneta101b_coco", data_format=data_format, **kwargs)
def simplepose_resneta152b_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs):
"""
SimplePose model on the base of ResNet(A)-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resneta152b(pretrained=pretrained_backbone, data_format=data_format).features
del backbone.children[-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="simplepose_resneta152b_coco", data_format=data_format, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
in_size = (256, 192)
keypoints = 17
return_heatmap = False
pretrained = False
models = [
simplepose_resnet18_coco,
simplepose_resnet50b_coco,
simplepose_resnet101b_coco,
simplepose_resnet152b_coco,
simplepose_resneta50b_coco,
simplepose_resneta101b_coco,
simplepose_resneta152b_coco,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
y = net(x)
assert (y.shape[0] == batch)
if return_heatmap:
if is_channels_first(data_format):
assert ((y.shape[1] == keypoints) and (y.shape[2] == x.shape[2] // 4) and
(y.shape[3] == x.shape[3] // 4))
else:
assert ((y.shape[3] == keypoints) and (y.shape[1] == x.shape[1] // 4) and
(y.shape[2] == x.shape[2] // 4))
else:
assert ((y.shape[1] == keypoints) and (y.shape[2] == 3))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != simplepose_resnet18_coco or weight_count == 15376721)
assert (model != simplepose_resnet50b_coco or weight_count == 33999697)
assert (model != simplepose_resnet101b_coco or weight_count == 52991825)
assert (model != simplepose_resnet152b_coco or weight_count == 68635473)
assert (model != simplepose_resneta50b_coco or weight_count == 34018929)
assert (model != simplepose_resneta101b_coco or weight_count == 53011057)
assert (model != simplepose_resneta152b_coco or weight_count == 68654705)
if __name__ == "__main__":
_test()
| 15,180 | 40.252717 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/vovnet.py | """
VoVNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
"""
__all__ = ['VoVNet', 'vovnet27s', 'vovnet39', 'vovnet57']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, SequentialConcurrent, MaxPool2d, SimpleSequential, flatten,\
is_channels_first
class VoVUnit(nn.Layer):
"""
VoVNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
branch_channels : int
Number of output channels for each branch.
num_branches : int
Number of branches.
resize : bool
Whether to use resize block.
use_residual : bool
Whether to use residual block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
branch_channels,
num_branches,
resize,
use_residual,
data_format="channels_last",
**kwargs):
super(VoVUnit, self).__init__(**kwargs)
self.resize = resize
self.use_residual = use_residual
if self.resize:
self.pool = MaxPool2d(
pool_size=3,
strides=2,
ceil_mode=True,
data_format=data_format,
name="pool")
self.branches = SequentialConcurrent(
data_format=data_format,
name="branches")
branch_in_channels = in_channels
for i in range(num_branches):
self.branches.add(conv3x3_block(
in_channels=branch_in_channels,
out_channels=branch_channels,
data_format=data_format,
name="branch{}".format(i + 1)))
branch_in_channels = branch_channels
self.concat_conv = conv1x1_block(
in_channels=(in_channels + num_branches * branch_channels),
out_channels=out_channels,
data_format=data_format,
name="concat_conv")
def call(self, x, training=None):
if self.resize:
x = self.pool(x)
if self.use_residual:
identity = x
x = self.branches(x, training=training)
x = self.concat_conv(x, training=training)
if self.use_residual:
x = x + identity
return x
class VoVInitBlock(nn.Layer):
"""
VoVNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(VoVInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=2,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class VoVNet(tf.keras.Model):
"""
VoVNet model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
branch_channels : list of list of int
Number of branch output channels for each unit.
num_branches : int
Number of branches for the each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
branch_channels,
num_branches,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(VoVNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
init_block_channels = 128
self.features = SimpleSequential(name="features")
self.features.add(VoVInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
use_residual = (j != 0)
resize = (j == 0) and (i != 0)
stage.add(VoVUnit(
in_channels=in_channels,
out_channels=out_channels,
branch_channels=branch_channels[i][j],
num_branches=num_branches,
resize=resize,
use_residual=use_residual,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_vovnet(blocks,
slim=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
slim : bool, default False
Whether to use a slim model.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 27:
layers = [1, 1, 1, 1]
elif blocks == 39:
layers = [1, 1, 2, 2]
elif blocks == 57:
layers = [1, 1, 4, 3]
else:
raise ValueError("Unsupported VoVNet with number of blocks: {}".format(blocks))
assert (sum(layers) * 6 + 3 == blocks)
num_branches = 5
channels_per_layers = [256, 512, 768, 1024]
branch_channels_per_layers = [128, 160, 192, 224]
if slim:
channels_per_layers = [ci // 2 for ci in channels_per_layers]
branch_channels_per_layers = [ci // 2 for ci in branch_channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
branch_channels = [[ci] * li for (ci, li) in zip(branch_channels_per_layers, layers)]
net = VoVNet(
channels=channels,
branch_channels=branch_channels,
num_branches=num_branches,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def vovnet27s(**kwargs):
"""
VoVNet-27-slim model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vovnet(blocks=27, slim=True, model_name="vovnet27s", **kwargs)
def vovnet39(**kwargs):
"""
VoVNet-39 model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vovnet(blocks=39, model_name="vovnet39", **kwargs)
def vovnet57(**kwargs):
"""
VoVNet-57 model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vovnet(blocks=57, model_name="vovnet57", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
vovnet27s,
vovnet39,
vovnet57,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != vovnet27s or weight_count == 3525736)
assert (model != vovnet39 or weight_count == 22600296)
assert (model != vovnet57 or weight_count == 36640296)
if __name__ == "__main__":
_test()
| 11,511 | 31.519774 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/espnetv2.py | """
ESPNetv2 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,'
https://arxiv.org/abs/1811.11431.
NB: not ready.
"""
__all__ = ['ESPNetv2', 'espnetv2_wd2', 'espnetv2_w1', 'espnetv2_w5d4', 'espnetv2_w3d2', 'espnetv2_w2']
import os
import math
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import BatchNorm, PReLU2, conv3x3, conv1x1_block, conv3x3_block, AvgPool2d, SimpleSequential,\
DualPathSequential, flatten, is_channels_first, get_channel_axis
class PreActivation(nn.Layer):
"""
PreResNet like pure pre-activation block without convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
data_format="channels_last",
**kwargs):
super(PreActivation, self).__init__(**kwargs)
assert (in_channels is not None)
self.bn = BatchNorm(
data_format=data_format,
name="bn")
self.activ = PReLU2(in_channels=in_channels, name="activ")
def call(self, x, training=None):
x = self.bn(x, training=training)
x = self.activ(x)
return x
class ShortcutBlock(nn.Layer):
"""
ESPNetv2 shortcut block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(ShortcutBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
activation=(lambda: PReLU2(in_channels=in_channels, name="activ")),
data_format=data_format,
name="conv1")
self.conv2 = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class HierarchicalConcurrent(SimpleSequential):
"""
A container for hierarchical concatenation of blocks with parameters.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(HierarchicalConcurrent, self).__init__(**kwargs)
self.axis = get_channel_axis(data_format)
def call(self, x, training=None):
out = []
y_prev = None
for block in self.children:
y = block(x, training=training)
print(y.shape)
if y_prev is not None:
y = y + y_prev
out.append(y)
y_prev = y
out = tf.concat(out, axis=self.axis)
return out
class ESPBlock(nn.Layer):
"""
ESPNetv2 block (so-called EESP block).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the branch convolution layers.
dilations : list of int
Dilation values for branches.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilations,
data_format="channels_last",
**kwargs):
super(ESPBlock, self).__init__(**kwargs)
num_branches = len(dilations)
assert (out_channels % num_branches == 0)
self.downsample = (strides != 1)
mid_channels = out_channels // num_branches
# dilations = [1] * len(dilations)
self.reduce_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
groups=num_branches,
activation=(lambda: PReLU2(in_channels=mid_channels, name="activ")),
data_format=data_format,
name="reduce_conv")
self.branches = HierarchicalConcurrent(
data_format=data_format,
name="branches")
for i in range(num_branches):
self.branches.add(conv3x3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
padding=dilations[i],
dilation=dilations[i],
groups=mid_channels,
data_format=data_format,
name="branch{}".format(i + 1)))
self.merge_conv = conv1x1_block(
in_channels=out_channels,
out_channels=out_channels,
groups=num_branches,
activation=None,
data_format=data_format,
name="merge_conv")
self.preactiv = PreActivation(
in_channels=out_channels,
data_format=data_format,
name="preactiv")
if not self.downsample:
self.activ = PReLU2(in_channels=out_channels, name="activ")
def call(self, x, x0, training=None):
y = self.reduce_conv(x, training=training)
y = self.branches(y, training=training)
y = self.preactiv(y, training=training)
y = self.merge_conv(y, training=training)
if not self.downsample:
y = y + x
y = self.activ(y)
return y, x0
class DownsampleBlock(nn.Layer):
"""
ESPNetv2 downsample block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
x0_channels : int
Number of input channels for shortcut.
dilations : list of int
Dilation values for branches in EESP block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
x0_channels,
dilations,
data_format="channels_last",
**kwargs):
super(DownsampleBlock, self).__init__(**kwargs)
self.data_format = data_format
inc_channels = out_channels - in_channels
# dilations = [1] * len(dilations)
self.pool = AvgPool2d(
pool_size=3,
strides=2,
padding=1,
ceil_mode=True,
data_format=data_format,
name="pool")
self.eesp = ESPBlock(
in_channels=in_channels,
out_channels=inc_channels,
strides=2,
dilations=dilations,
data_format=data_format,
name="eesp")
self.shortcut_block = ShortcutBlock(
in_channels=x0_channels,
out_channels=out_channels,
data_format=data_format,
name="shortcut_block")
self.activ = PReLU2(in_channels=out_channels, name="activ")
def call(self, x, x0, training=None):
y1 = self.pool(x)
y2, _ = self.eesp(x, None, training=training)
x = tf.concat([y1, y2], axis=get_channel_axis(self.data_format))
x0 = self.pool(x0)
y3 = self.shortcut_block(x0, training=training)
x = x + y3
x = self.activ(x)
return x, x0
class ESPInitBlock(nn.Layer):
"""
ESPNetv2 initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(ESPInitBlock, self).__init__(**kwargs)
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
activation=(lambda: PReLU2(in_channels=out_channels, name="activ")),
data_format=data_format,
name="conv")
self.pool = AvgPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, x0, training=None):
x = self.conv(x, training=training)
x0 = self.pool(x0)
return x, x0
class ESPFinalBlock(nn.Layer):
"""
ESPNetv2 final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
final_groups : int
Number of groups in the last convolution layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
final_groups,
data_format="channels_last",
**kwargs):
super(ESPFinalBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
groups=in_channels,
activation=(lambda: PReLU2(in_channels=in_channels, name="activ")),
data_format=data_format,
name="conv1")
self.conv2 = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
groups=final_groups,
activation=(lambda: PReLU2(in_channels=out_channels, name="activ")),
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class ESPNetv2(tf.keras.Model):
"""
ESPNetv2 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,'
https://arxiv.org/abs/1811.11431.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
final_block_groups : int
Number of groups for the final unit.
dilations : list of list of list of int
Dilation values for branches in each unit.
dropout_rate : float, default 0.2
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
final_block_groups,
dilations,
dropout_rate=0.2,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ESPNetv2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
x0_channels = in_channels
self.features = DualPathSequential(
return_two=False,
first_ordinals=0,
last_ordinals=2,
name="features")
self.features.add(ESPInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(name="stage{}_".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
if j == 0:
unit = DownsampleBlock(
in_channels=in_channels,
out_channels=out_channels,
x0_channels=x0_channels,
dilations=dilations[i][j],
data_format=data_format,
name="unit{}".format(j + 1))
else:
unit = ESPBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=1,
dilations=dilations[i][j],
data_format=data_format,
name="unit{}".format(j + 1))
stage.add(unit)
in_channels = out_channels
self.features.add(stage)
self.features.add(ESPFinalBlock(
in_channels=in_channels,
out_channels=final_block_channels,
final_groups=final_block_groups,
data_format=data_format,
name="final_block"))
in_channels = final_block_channels
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = SimpleSequential(name="output1")
self.output1.add(nn.Dropout(
rate=dropout_rate,
name="dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=in_channels,
name="fc"))
def call(self, x, training=None):
x = self.features(x, x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_espnetv2(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ESPNetv2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
assert (width_scale <= 2.0)
branches = 4
layers = [1, 4, 8, 4]
max_dilation_list = [6, 5, 4, 3, 2]
max_dilations = [[max_dilation_list[i]] + [max_dilation_list[i + 1]] * (li - 1) for (i, li) in enumerate(layers)]
dilations = [[sorted([k + 1 if k < dij else 1 for k in range(branches)]) for dij in di] for di in max_dilations]
base_channels = 32
weighed_base_channels = math.ceil(float(math.floor(base_channels * width_scale)) / branches) * branches
channels_per_layers = [weighed_base_channels * pow(2, i + 1) for i in range(len(layers))]
init_block_channels = base_channels if weighed_base_channels > base_channels else weighed_base_channels
final_block_channels = 1024 if width_scale <= 1.5 else 1280
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = ESPNetv2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
final_block_groups=branches,
dilations=dilations,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def espnetv2_wd2(**kwargs):
"""
ESPNetv2 x0.5 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=0.5, model_name="espnetv2_wd2", **kwargs)
def espnetv2_w1(**kwargs):
"""
ESPNetv2 x1.0 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=1.0, model_name="espnetv2_w1", **kwargs)
def espnetv2_w5d4(**kwargs):
"""
ESPNetv2 x1.25 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=1.25, model_name="espnetv2_w5d4", **kwargs)
def espnetv2_w3d2(**kwargs):
"""
ESPNetv2 x1.5 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=1.5, model_name="espnetv2_w3d2", **kwargs)
def espnetv2_w2(**kwargs):
"""
ESPNetv2 x2.0 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=2.0, model_name="espnetv2_w2", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
espnetv2_wd2,
espnetv2_w1,
espnetv2_w5d4,
espnetv2_w3d2,
espnetv2_w2,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != espnetv2_wd2 or weight_count == 1241092)
assert (model != espnetv2_w1 or weight_count == 1669592)
assert (model != espnetv2_w5d4 or weight_count == 1964832)
assert (model != espnetv2_w3d2 or weight_count == 2314120)
assert (model != espnetv2_w2 or weight_count == 3497144)
if __name__ == "__main__":
_test()
| 20,454 | 32.260163 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/shufflenet.py | """
ShuffleNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
"""
__all__ = ['ShuffleNet', 'shufflenet_g1_w1', 'shufflenet_g2_w1', 'shufflenet_g3_w1', 'shufflenet_g4_w1',
'shufflenet_g8_w1', 'shufflenet_g1_w3d4', 'shufflenet_g3_w3d4', 'shufflenet_g1_wd2', 'shufflenet_g3_wd2',
'shufflenet_g1_wd4', 'shufflenet_g3_wd4']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv3x3, depthwise_conv3x3, ChannelShuffle, BatchNorm, MaxPool2d, AvgPool2d,\
SimpleSequential, get_channel_axis, flatten
class ShuffleUnit(nn.Layer):
"""
ShuffleNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
groups,
downsample,
ignore_group,
data_format="channels_last",
**kwargs):
super(ShuffleUnit, self).__init__(**kwargs)
self.data_format = data_format
self.downsample = downsample
mid_channels = out_channels // 4
if downsample:
out_channels -= in_channels
self.compress_conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=(1 if ignore_group else groups),
data_format=data_format,
name="compress_conv1")
self.compress_bn1 = BatchNorm(
# in_channels=mid_channels,
data_format=data_format,
name="compress_bn1")
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups,
data_format=data_format,
name="c_shuffle")
self.dw_conv2 = depthwise_conv3x3(
channels=mid_channels,
strides=(2 if self.downsample else 1),
data_format=data_format,
name="dw_conv2")
self.dw_bn2 = BatchNorm(
# in_channels=mid_channels,
data_format=data_format,
name="dw_bn2")
self.expand_conv3 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
data_format=data_format,
name="expand_conv3")
self.expand_bn3 = BatchNorm(
# in_channels=out_channels,
data_format=data_format,
name="expand_bn3")
if downsample:
self.avgpool = AvgPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="avgpool")
self.activ = nn.ReLU()
def call(self, x, training=None):
identity = x
x = self.compress_conv1(x)
x = self.compress_bn1(x, training=training)
x = self.activ(x)
x = self.c_shuffle(x)
x = self.dw_conv2(x)
x = self.dw_bn2(x, training=training)
x = self.expand_conv3(x)
x = self.expand_bn3(x, training=training)
if self.downsample:
identity = self.avgpool(identity)
x = tf.concat([x, identity], axis=get_channel_axis(self.data_format))
else:
x = x + identity
x = self.activ(x)
return x
class ShuffleInitBlock(nn.Layer):
"""
ShuffleNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(ShuffleInitBlock, self).__init__(**kwargs)
self.conv = conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
data_format=data_format,
name="conv")
self.bn = BatchNorm(
# in_channels=out_channels,
data_format=data_format,
name="bn")
self.activ = nn.ReLU()
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x)
x = self.bn(x, training=training)
x = self.activ(x)
x = self.pool(x)
return x
class ShuffleNet(tf.keras.Model):
"""
ShuffleNet model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
groups,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ShuffleNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(ShuffleInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = (i == 0) and (j == 0)
stage.add(ShuffleUnit(
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
downsample=downsample,
ignore_group=ignore_group,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x)
x = flatten(x, self.data_format)
return x
def get_shufflenet(groups,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ShuffleNet model with specific parameters.
Parameters:
----------
groups : int
Number of groups in convolution layers.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
layers = [4, 8, 4]
if groups == 1:
channels_per_layers = [144, 288, 576]
elif groups == 2:
channels_per_layers = [200, 400, 800]
elif groups == 3:
channels_per_layers = [240, 480, 960]
elif groups == 4:
channels_per_layers = [272, 544, 1088]
elif groups == 8:
channels_per_layers = [384, 768, 1536]
else:
raise ValueError("The {} of groups is not supported".format(groups))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
net = ShuffleNet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def shufflenet_g1_w1(**kwargs):
"""
ShuffleNet 1x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=1.0, model_name="shufflenet_g1_w1", **kwargs)
def shufflenet_g2_w1(**kwargs):
"""
ShuffleNet 1x (g=2) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=2, width_scale=1.0, model_name="shufflenet_g2_w1", **kwargs)
def shufflenet_g3_w1(**kwargs):
"""
ShuffleNet 1x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=1.0, model_name="shufflenet_g3_w1", **kwargs)
def shufflenet_g4_w1(**kwargs):
"""
ShuffleNet 1x (g=4) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=4, width_scale=1.0, model_name="shufflenet_g4_w1", **kwargs)
def shufflenet_g8_w1(**kwargs):
"""
ShuffleNet 1x (g=8) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=8, width_scale=1.0, model_name="shufflenet_g8_w1", **kwargs)
def shufflenet_g1_w3d4(**kwargs):
"""
ShuffleNet 0.75x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=0.75, model_name="shufflenet_g1_w3d4", **kwargs)
def shufflenet_g3_w3d4(**kwargs):
"""
ShuffleNet 0.75x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=0.75, model_name="shufflenet_g3_w3d4", **kwargs)
def shufflenet_g1_wd2(**kwargs):
"""
ShuffleNet 0.5x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=0.5, model_name="shufflenet_g1_wd2", **kwargs)
def shufflenet_g3_wd2(**kwargs):
"""
ShuffleNet 0.5x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=0.5, model_name="shufflenet_g3_wd2", **kwargs)
def shufflenet_g1_wd4(**kwargs):
"""
ShuffleNet 0.25x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=0.25, model_name="shufflenet_g1_wd4", **kwargs)
def shufflenet_g3_wd4(**kwargs):
"""
ShuffleNet 0.25x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=0.25, model_name="shufflenet_g3_wd4", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
shufflenet_g1_w1,
shufflenet_g2_w1,
shufflenet_g3_w1,
shufflenet_g4_w1,
shufflenet_g8_w1,
shufflenet_g1_w3d4,
shufflenet_g3_w3d4,
shufflenet_g1_wd2,
shufflenet_g3_wd2,
shufflenet_g1_wd4,
shufflenet_g3_wd4,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenet_g1_w1 or weight_count == 1531936)
assert (model != shufflenet_g2_w1 or weight_count == 1733848)
assert (model != shufflenet_g3_w1 or weight_count == 1865728)
assert (model != shufflenet_g4_w1 or weight_count == 1968344)
assert (model != shufflenet_g8_w1 or weight_count == 2434768)
assert (model != shufflenet_g1_w3d4 or weight_count == 975214)
assert (model != shufflenet_g3_w3d4 or weight_count == 1238266)
assert (model != shufflenet_g1_wd2 or weight_count == 534484)
assert (model != shufflenet_g3_wd2 or weight_count == 718324)
assert (model != shufflenet_g1_wd4 or weight_count == 209746)
assert (model != shufflenet_g3_wd4 or weight_count == 305902)
if __name__ == "__main__":
_test()
| 17,521 | 33.089494 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/bamresnet.py | """
BAM-ResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
"""
__all__ = ['BamResNet', 'bam_resnet18', 'bam_resnet34', 'bam_resnet50', 'bam_resnet101', 'bam_resnet152']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv1x1_block, conv3x3_block, BatchNorm, SimpleSequential, flatten,\
is_channels_first
from .resnet import ResInitBlock, ResUnit
class DenseBlock(nn.Layer):
"""
Standard dense block with Batch normalization and ReLU activation.
Parameters:
----------
in_channels : int
Number of input features.
out_channels : int
Number of output features.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(DenseBlock, self).__init__(**kwargs)
self.fc = nn.Dense(
units=out_channels,
input_dim=in_channels,
name="fc")
self.bn = BatchNorm(
data_format=data_format,
name="bn")
self.activ = nn.ReLU()
def call(self, x, training=None):
x = self.fc(x)
x = self.bn(x, training=training)
x = self.activ(x)
return x
class ChannelGate(nn.Layer):
"""
BAM channel gate block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
num_layers : int, default 1
Number of dense blocks.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
reduction_ratio=16,
num_layers=1,
data_format="channels_last",
**kwargs):
super(ChannelGate, self).__init__(**kwargs)
self.data_format = data_format
mid_channels = channels // reduction_ratio
self.pool = nn.GlobalAvgPool2D(
data_format=data_format,
name="pool")
self.flatten = nn.Flatten()
self.init_fc = DenseBlock(
in_channels=channels,
out_channels=mid_channels,
data_format=data_format,
name="init_fc")
self.main_fcs = SimpleSequential(name="main_fcs")
for i in range(num_layers - 1):
self.main_fcs.children.append(DenseBlock(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="fc{}".format(i + 1)))
self.final_fc = nn.Dense(
units=channels,
input_dim=mid_channels,
name="final_fc")
def call(self, x, training=None):
input = x
x = self.pool(x)
x = self.flatten(x)
x = self.init_fc(x)
x = self.main_fcs(x, training=training)
x = self.final_fc(x)
if is_channels_first(self.data_format):
x = tf.broadcast_to(tf.expand_dims(tf.expand_dims(x, 2), 3), shape=input.shape)
else:
x = tf.broadcast_to(tf.expand_dims(tf.expand_dims(x, 1), 2), shape=input.shape)
return x
class SpatialGate(nn.Layer):
"""
BAM spatial gate block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
num_dil_convs : int, default 2
Number of dilated convolutions.
dilation : int, default 4
Dilation/padding value for corresponding convolutions.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
reduction_ratio=16,
num_dil_convs=2,
dilation=4,
data_format="channels_last",
**kwargs):
super(SpatialGate, self).__init__(**kwargs)
mid_channels = channels // reduction_ratio
self.init_conv = conv1x1_block(
in_channels=channels,
out_channels=mid_channels,
strides=1,
use_bias=True,
data_format=data_format,
name="init_conv")
self.dil_convs = SimpleSequential(name="dil_convs")
for i in range(num_dil_convs):
self.dil_convs.children.append(conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=1,
padding=dilation,
dilation=dilation,
use_bias=True,
data_format=data_format,
name="conv{}".format(i + 1)))
self.final_conv = conv1x1(
in_channels=mid_channels,
out_channels=1,
strides=1,
use_bias=True,
data_format=data_format,
name="final_conv")
def call(self, x, training=None):
input = x
x = self.init_conv(x, training=training)
x = self.dil_convs(x, training=training)
x = self.final_conv(x)
x = tf.broadcast_to(x, shape=input.shape)
return x
class BamBlock(nn.Layer):
"""
BAM attention block for BAM-ResNet.
Parameters:
----------
channels : int
Number of input/output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
data_format="channels_last",
**kwargs):
super(BamBlock, self).__init__(**kwargs)
self.ch_att = ChannelGate(
channels=channels,
data_format=data_format,
name="ch_att")
self.sp_att = SpatialGate(
channels=channels,
data_format=data_format,
name="sp_att")
self.sigmoid = tf.nn.sigmoid
def call(self, x, training=None):
att = 1 + self.sigmoid(self.ch_att(x, training=training) * self.sp_att(x, training=training))
x = x * att
return x
class BamResUnit(nn.Layer):
"""
BAM-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck,
data_format="channels_last",
**kwargs):
super(BamResUnit, self).__init__(**kwargs)
self.use_bam = (strides != 1)
if self.use_bam:
self.bam = BamBlock(
channels=in_channels,
data_format=data_format,
name="bam")
self.res_unit = ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=False,
data_format=data_format,
name="res_unit")
def call(self, x, training=None):
if self.use_bam:
x = self.bam(x, training=training)
x = self.res_unit(x, training=training)
return x
class BamResNet(tf.keras.Model):
"""
BAM-ResNet model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(BamResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(BamResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_resnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create BAM-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
use_se : bool
Whether to use SE block.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported BAM-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = BamResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def bam_resnet18(**kwargs):
"""
BAM-ResNet-18 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="bam_resnet18", **kwargs)
def bam_resnet34(**kwargs):
"""
BAM-ResNet-34 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="bam_resnet34", **kwargs)
def bam_resnet50(**kwargs):
"""
BAM-ResNet-50 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="bam_resnet50", **kwargs)
def bam_resnet101(**kwargs):
"""
BAM-ResNet-101 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="bam_resnet101", **kwargs)
def bam_resnet152(**kwargs):
"""
BAM-ResNet-152 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="bam_resnet152", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
bam_resnet18,
bam_resnet34,
bam_resnet50,
bam_resnet101,
bam_resnet152,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != bam_resnet18 or weight_count == 11712503)
assert (model != bam_resnet34 or weight_count == 21820663)
assert (model != bam_resnet50 or weight_count == 25915099)
assert (model != bam_resnet101 or weight_count == 44907227)
assert (model != bam_resnet152 or weight_count == 60550875)
if __name__ == "__main__":
_test()
| 15,973 | 30.757455 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/centernet.py | """
CenterNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Objects as Points,' https://arxiv.org/abs/1904.07850.
"""
__all__ = ['CenterNet', 'centernet_resnet18_voc', 'centernet_resnet18_coco', 'centernet_resnet50b_voc',
'centernet_resnet50b_coco', 'centernet_resnet101b_voc', 'centernet_resnet101b_coco',
'CenterNetHeatmapMaxDet']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import MaxPool2d, conv1x1, conv3x3_block, DeconvBlock, Concurrent, SimpleSequential, is_channels_first
from .resnet import resnet18, resnet50b, resnet101b
class CenterNetDecoderUnit(nn.Layer):
"""
CenterNet decoder unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(CenterNetDecoderUnit, self).__init__(**kwargs)
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name="conv")
self.deconv = DeconvBlock(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=4,
strides=2,
padding=1,
data_format=data_format,
name="deconv")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.deconv(x, training=training)
return x
class CenterNetHeadBlock(nn.Layer):
"""
CenterNet simple head block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(CenterNetHeadBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
use_bias=True,
use_bn=False,
data_format=data_format,
name="conv1")
self.conv2 = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x)
x = self.conv2(x)
return x
class CenterNetHeatmapBlock(nn.Layer):
"""
CenterNet heatmap block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
do_nms : bool
Whether do NMS (or simply clip for training otherwise).
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
do_nms,
data_format="channels_last",
**kwargs):
super(CenterNetHeatmapBlock, self).__init__(**kwargs)
self.do_nms = do_nms
self.head = CenterNetHeadBlock(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="head")
self.sigmoid = tf.nn.sigmoid
if self.do_nms:
self.pool = MaxPool2d(
pool_size=3,
strides=1,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.head(x)
x = self.sigmoid(x)
if self.do_nms:
y = self.pool(x)
x = x * (y.numpy() == x.numpy())
else:
eps = 1e-4
x = tf.clip_by_value(x, clip_value_min=eps, clip_value_max=(1.0 - eps))
return x
class CenterNetHeatmapMaxDet(nn.Layer):
"""
CenterNet decoder for heads (heatmap, wh, reg).
Parameters:
----------
topk : int, default 40
Keep only `topk` detections.
scale : int, default is 4
Downsampling scale factor.
max_batch : int, default is 256
Maximal batch size.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
topk=40,
scale=4,
max_batch=256,
data_format="channels_last",
**kwargs):
super(CenterNetHeatmapMaxDet, self).__init__(**kwargs)
self.topk = topk
self.scale = scale
self.max_batch = max_batch
self.data_format = data_format
def call(self, x, training=None):
import numpy as np
x_ = x.numpy()
if not is_channels_first(self.data_format):
x_ = x_.transpose((0, 3, 1, 2))
heatmap = x_[:, :-4]
wh = x_[:, -4:-2]
reg = x_[:, -2:]
batch, _, out_h, out_w = heatmap.shape
heatmap_flat = heatmap.reshape((batch, -1))
indices = np.argsort(heatmap_flat)[:, -self.topk:]
scores = np.take_along_axis(heatmap_flat, indices=indices, axis=-1)
topk_classes = (indices // (out_h * out_w)).astype(dtype=np.float32)
topk_indices = indices % (out_h * out_w)
topk_ys = (topk_indices // out_w).astype(dtype=np.float32)
topk_xs = (topk_indices % out_w).astype(dtype=np.float32)
center = reg.transpose((0, 2, 3, 1)).reshape((batch, -1, 2))
wh = wh.transpose((0, 2, 3, 1)).reshape((batch, -1, 2))
xs = np.take_along_axis(center[:, :, 0], indices=topk_indices, axis=-1)
ys = np.take_along_axis(center[:, :, 1], indices=topk_indices, axis=-1)
topk_xs = topk_xs + xs
topk_ys = topk_ys + ys
w = np.take_along_axis(wh[:, :, 0], indices=topk_indices, axis=-1)
h = np.take_along_axis(wh[:, :, 1], indices=topk_indices, axis=-1)
half_w = 0.5 * w
half_h = 0.5 * h
bboxes = tf.stack((topk_xs - half_w, topk_ys - half_h, topk_xs + half_w, topk_ys + half_h), axis=-1)
bboxes = bboxes * self.scale
topk_classes = tf.expand_dims(topk_classes, axis=-1)
scores = tf.expand_dims(scores, axis=-1)
result = tf.concat((bboxes, topk_classes, scores), axis=-1)
return result
class CenterNet(tf.keras.Model):
"""
CenterNet model from 'Objects as Points,' https://arxiv.org/abs/1904.07850.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
channels : list of int
Number of output channels for each decoder unit.
return_heatmap : bool, default False
Whether to return only heatmap.
topk : int, default 40
Keep only `topk` detections.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (512, 512)
Spatial size of the expected input image.
classes : int, default 80
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
return_heatmap=False,
topk=40,
in_channels=3,
in_size=(512, 512),
classes=80,
data_format="channels_last",
**kwargs):
super(CenterNet, self).__init__(**kwargs)
self.in_size = in_size
self.in_channels = in_channels
self.return_heatmap = return_heatmap
self.data_format = data_format
self.backbone = backbone
self.backbone._name = "backbone"
self.decoder = SimpleSequential(name="decoder")
in_channels = backbone_out_channels
for i, out_channels in enumerate(channels):
self.decoder.add(CenterNetDecoderUnit(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="unit{}".format(i + 1)))
in_channels = out_channels
heads = Concurrent(
data_format=data_format,
name="heads")
heads.add(CenterNetHeatmapBlock(
in_channels=in_channels,
out_channels=classes,
do_nms=(not self.return_heatmap),
data_format=data_format,
name="heapmap_block"))
heads.add(CenterNetHeadBlock(
in_channels=in_channels,
out_channels=2,
data_format=data_format,
name="wh_block"))
heads.add(CenterNetHeadBlock(
in_channels=in_channels,
out_channels=2,
data_format=data_format,
name="reg_block"))
self.decoder.add(heads)
if not self.return_heatmap:
self.heatmap_max_det = CenterNetHeatmapMaxDet(
topk=topk,
scale=4,
data_format=data_format,
name="heatmap_max_det")
def call(self, x, training=None):
x = self.backbone(x, training=training)
x = self.decoder(x, training=training)
if not self.return_heatmap or not tf.executing_eagerly():
x = self.heatmap_max_det(x)
return x
def get_centernet(backbone,
backbone_out_channels,
classes,
model_name=None,
data_format="channels_last",
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create CenterNet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
classes : int
Number of classes.
model_name : str or None, default None
Model name for loading pretrained model.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
HybridBlock
A network.
"""
channels = [256, 128, 64]
net = CenterNet(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
classes=classes,
data_format=data_format,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def centernet_resnet18_voc(pretrained_backbone=False, classes=20, data_format="channels_last", **kwargs):
"""
CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 20
Number of classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnet18(pretrained=pretrained_backbone).features
del backbone.children[-1]
return get_centernet(backbone=backbone, backbone_out_channels=512, classes=classes,
model_name="centernet_resnet18_voc", data_format=data_format, **kwargs)
def centernet_resnet18_coco(pretrained_backbone=False, classes=80, data_format="channels_last", **kwargs):
"""
CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 80
Number of classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnet18(pretrained=pretrained_backbone).features
del backbone.children[-1]
return get_centernet(backbone=backbone, backbone_out_channels=512, classes=classes,
model_name="centernet_resnet18_coco", data_format=data_format, **kwargs)
def centernet_resnet50b_voc(pretrained_backbone=False, classes=20, data_format="channels_last", **kwargs):
"""
CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 20
Number of classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone).features
del backbone.children[-1]
return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes,
model_name="centernet_resnet50b_voc", data_format=data_format, **kwargs)
def centernet_resnet50b_coco(pretrained_backbone=False, classes=80, data_format="channels_last", **kwargs):
"""
CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 80
Number of classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone).features
del backbone.children[-1]
return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes,
model_name="centernet_resnet50b_coco", data_format=data_format, **kwargs)
def centernet_resnet101b_voc(pretrained_backbone=False, classes=20, data_format="channels_last", **kwargs):
"""
CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 20
Number of classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnet101b(pretrained=pretrained_backbone).features
del backbone.children[-1]
return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes,
model_name="centernet_resnet101b_voc", data_format=data_format, **kwargs)
def centernet_resnet101b_coco(pretrained_backbone=False, classes=80, data_format="channels_last", **kwargs):
"""
CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 80
Number of classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnet101b(pretrained=pretrained_backbone).features
del backbone.children[-1]
return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes,
model_name="centernet_resnet101b_coco", data_format=data_format, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
in_size = (512, 512)
topk = 40
return_heatmap = False
pretrained = False
models = [
(centernet_resnet18_voc, 20),
(centernet_resnet18_coco, 80),
(centernet_resnet50b_voc, 20),
(centernet_resnet50b_coco, 80),
(centernet_resnet101b_voc, 20),
(centernet_resnet101b_coco, 80),
]
for model, classes in models:
net = model(pretrained=pretrained, topk=topk, in_size=in_size, return_heatmap=return_heatmap,
data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
y = net(x)
assert (y.shape[0] == batch)
if return_heatmap:
if is_channels_first(data_format):
assert (y.shape[1] == classes + 4) and (y.shape[2] == x.shape[2] // 4) and (
y.shape[3] == x.shape[3] // 4)
else:
assert (y.shape[3] == classes + 4) and (y.shape[1] == x.shape[1] // 4) and (
y.shape[2] == x.shape[2] // 4)
else:
assert (y.shape[1] == topk) and (y.shape[2] == 6)
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != centernet_resnet18_voc or weight_count == 14215640)
assert (model != centernet_resnet18_coco or weight_count == 14219540)
assert (model != centernet_resnet50b_voc or weight_count == 30086104)
assert (model != centernet_resnet50b_coco or weight_count == 30090004)
assert (model != centernet_resnet101b_voc or weight_count == 49078232)
assert (model != centernet_resnet101b_coco or weight_count == 49082132)
if __name__ == "__main__":
_test()
| 20,073 | 35.039497 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/proxylessnas_cub.py | """
ProxylessNAS for CUB-200-2011, implemented in TensorFlow.
Original paper: 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
"""
__all__ = ['proxylessnas_cpu_cub', 'proxylessnas_gpu_cub', 'proxylessnas_mobile_cub', 'proxylessnas_mobile14_cub']
from .common import is_channels_first
from .proxylessnas import get_proxylessnas
def proxylessnas_cpu_cub(classes=200, **kwargs):
"""
ProxylessNAS (CPU) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and
Hardware,' https://arxiv.org/abs/1812.00332.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(classes=classes, version="cpu", model_name="proxylessnas_cpu_cub", **kwargs)
def proxylessnas_gpu_cub(classes=200, **kwargs):
"""
ProxylessNAS (GPU) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and
Hardware,' https://arxiv.org/abs/1812.00332.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(classes=classes, version="gpu", model_name="proxylessnas_gpu_cub", **kwargs)
def proxylessnas_mobile_cub(classes=200, **kwargs):
"""
ProxylessNAS (Mobile) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task
and Hardware,' https://arxiv.org/abs/1812.00332.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(classes=classes, version="mobile", model_name="proxylessnas_mobile_cub", **kwargs)
def proxylessnas_mobile14_cub(classes=200, **kwargs):
"""
ProxylessNAS (Mobile-14) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task
and Hardware,' https://arxiv.org/abs/1812.00332.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(classes=classes, version="mobile14", model_name="proxylessnas_mobile14_cub", **kwargs)
def _test():
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
proxylessnas_cpu_cub,
proxylessnas_gpu_cub,
proxylessnas_mobile_cub,
proxylessnas_mobile14_cub,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 200))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != proxylessnas_cpu_cub or weight_count == 3215248)
assert (model != proxylessnas_gpu_cub or weight_count == 5736648)
assert (model != proxylessnas_mobile_cub or weight_count == 3055712)
assert (model != proxylessnas_mobile14_cub or weight_count == 5423168)
if __name__ == "__main__":
_test()
| 4,145 | 34.741379 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/ibnresnet.py | """
IBN-ResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
"""
__all__ = ['IBNResNet', 'ibn_resnet50', 'ibn_resnet101', 'ibn_resnet152']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import Conv2d, BatchNorm, conv1x1_block, conv3x3_block, IBN, SimpleSequential, flatten, is_channels_first
from .resnet import ResInitBlock
class IBNConvBlock(nn.Layer):
"""
IBN-Net specific convolution block with BN/IBN normalization and ReLU activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_ibn : bool, default False
Whether use Instance-Batch Normalization.
activate : bool, default True
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_ibn=False,
activate=True,
data_format="channels_last",
**kwargs):
super(IBNConvBlock, self).__init__(**kwargs)
self.activate = activate
self.use_ibn = use_ibn
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name="conv")
if self.use_ibn:
self.ibn = IBN(
channels=out_channels,
data_format=data_format,
name="ibn")
else:
self.bn = BatchNorm(
data_format=data_format,
name="bn")
if self.activate:
self.activ = nn.ReLU()
def call(self, x, training=None):
x = self.conv(x, training=training)
if self.use_ibn:
x = self.ibn(x, training=training)
else:
x = self.bn(x, training=training)
if self.activate:
x = self.activ(x)
return x
def ibn_conv1x1_block(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
use_ibn=False,
activate=True,
data_format="channels_last",
**kwargs):
"""
1x1 version of the IBN-Net specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_ibn : bool, default False
Whether use Instance-Batch Normalization.
activate : bool, default True
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return IBNConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=groups,
use_bias=use_bias,
use_ibn=use_ibn,
activate=activate,
data_format=data_format,
**kwargs)
class IBNResBottleneck(nn.Layer):
"""
IBN-ResNet bottleneck block for residual path in IBN-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
conv1_ibn,
data_format="channels_last",
**kwargs):
super(IBNResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
self.conv1 = ibn_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_ibn=conv1_ibn,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class IBNResUnit(nn.Layer):
"""
IBN-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
conv1_ibn,
data_format="channels_last",
**kwargs):
super(IBNResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = IBNResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_ibn=conv1_ibn,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
return x
class IBNResNet(tf.keras.Model):
"""
IBN-ResNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(IBNResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
conv1_ibn = (out_channels < 2048)
stage.add(IBNResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_ibn=conv1_ibn,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_ibnresnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create IBN-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported IBN-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = IBNResNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def ibn_resnet50(**kwargs):
"""
IBN-ResNet-50 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ibnresnet(blocks=50, model_name="ibn_resnet50", **kwargs)
def ibn_resnet101(**kwargs):
"""
IBN-ResNet-101 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ibnresnet(blocks=101, model_name="ibn_resnet101", **kwargs)
def ibn_resnet152(**kwargs):
"""
IBN-ResNet-152 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ibnresnet(blocks=152, model_name="ibn_resnet152", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
ibn_resnet50,
ibn_resnet101,
ibn_resnet152,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibn_resnet50 or weight_count == 25557032)
assert (model != ibn_resnet101 or weight_count == 44549160)
assert (model != ibn_resnet152 or weight_count == 60192808)
if __name__ == "__main__":
_test()
| 14,465 | 31.290179 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/common.py | """
Common routines for models in TensorFlow 2.0.
"""
__all__ = ['is_channels_first', 'get_channel_axis', 'round_channels', 'get_im_size', 'interpolate_im', 'BreakBlock',
'ReLU6', 'HSwish', 'PReLU2', 'get_activation_layer', 'flatten', 'MaxPool2d', 'AvgPool2d', 'GlobalAvgPool2d',
'BatchNorm', 'InstanceNorm', 'IBN', 'Conv1d', 'Conv2d', 'SelectableDense', 'DenseBlock', 'ConvBlock1d',
'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'ConvBlock', 'conv1x1_block', 'conv3x3_block', 'conv5x5_block',
'conv7x7_block', 'dwconv_block', 'dwconv3x3_block', 'dwconv5x5_block', 'dwsconv3x3_block', 'PreConvBlock',
'pre_conv1x1_block', 'pre_conv3x3_block', 'DeconvBlock', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock',
'SABlock', 'SAConvBlock', 'saconv3x3_block', 'PixelShuffle', 'DucBlock', 'Identity', 'SimpleSequential',
'ParametricSequential', 'DualPathSequential', 'Concurrent', 'SequentialConcurrent', 'ParametricConcurrent',
'MultiOutputSequential', 'ParallelConcurent', 'DualPathParallelConcurent', 'NormActivation',
'InterpolationBlock', 'Hourglass', 'HeatmapMaxDetBlock']
import math
from inspect import isfunction
import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as nn
from tensorflow.python.keras import backend as K
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import initializers
def is_channels_first(data_format):
"""
Is tested data format channels first.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
bool
A flag.
"""
return data_format == "channels_first"
def get_channel_axis(data_format):
"""
Get channel axis.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
int
Channel axis.
"""
return 1 if is_channels_first(data_format) else -1
def round_channels(channels,
divisor=8):
"""
Round weighted channel number (make divisible operation).
Parameters:
----------
channels : int or float
Original number of channels.
divisor : int, default 8
Alignment value.
Returns:
-------
int
Weighted number of channels.
"""
rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if float(rounded_channels) < 0.9 * channels:
rounded_channels += divisor
return rounded_channels
def get_im_size(x,
data_format):
"""
Get spatial size for a tensor.
Parameters:
----------
x : tensor
A tensor.
data_format : str
The ordering of the dimensions in the tensor.
Returns:
-------
(int, int)
Size (height x width).
"""
x_shape = x.get_shape().as_list()
return x_shape[2:4] if is_channels_first(data_format) else x_shape[1:3]
def interpolate_im(x,
scale_factor=1,
out_size=None,
data_format="channels_last"):
"""
Bilinear change spatial size for a tensor.
Parameters:
----------
x : tensor
A tensor.
scale_factor : int, default 1
Multiplier for spatial size.
out_size : tuple of 2 int, default None
Spatial size of the output tensor for the bilinear upsampling operation.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
tensor
Resulted tensor.
"""
if out_size is None:
in_size = get_im_size(x, data_format=data_format)
out_size = tuple(i * scale_factor for i in in_size) if scale_factor != 0 else in_size
if is_channels_first(data_format):
x = tf.transpose(x, perm=[0, 2, 3, 1])
x = tf.image.resize(
images=x,
size=out_size)
if is_channels_first(data_format):
x = tf.transpose(x, perm=[0, 3, 1, 2])
return x
class BreakBlock(nn.Layer):
"""
Break coonnection block for hourglass.
"""
def __init__(self, **kwargs):
super(BreakBlock, self).__init__(**kwargs)
def call(self, x):
return None
def __repr__(self):
return '{name}()'.format(name=self.__class__.__name__)
class ReLU6(nn.Layer):
"""
ReLU6 activation layer.
"""
def __init__(self, **kwargs):
super(ReLU6, self).__init__(**kwargs)
def call(self, x):
return tf.nn.relu6(x)
class Swish(nn.Layer):
"""
Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.
"""
def call(self, x):
return x * tf.nn.sigmoid(x)
class HSigmoid(nn.Layer):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
"""
def __init__(self, **kwargs):
super(HSigmoid, self).__init__(**kwargs)
def call(self, x):
return tf.nn.relu6(x + 3.0) / 6.0
class HSwish(nn.Layer):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
"""
def __init__(self, **kwargs):
super(HSwish, self).__init__(**kwargs)
def call(self, x):
return x * tf.nn.relu6(x + 3.0) / 6.0
class PReLU2(nn.PReLU):
"""
Parametric leaky version of a Rectified Linear Unit (with wide alpha).
Parameters:
----------
in_channels : int
Number of input channels.
alpha_initializer : tf.Initializer, default tf.constant_initializer(0.25)
Initializer function for the weights.
shared_axes : list of int, default None
The axes along which to share learnable parameters for the activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels=1,
alpha_initializer=tf.constant_initializer(0.25),
data_format="channels_last",
**kwargs):
self.in_channels = in_channels
self.data_format = data_format
super(PReLU2, self).__init__(
alpha_initializer=alpha_initializer,
**kwargs)
def build(self, input_shape):
self.alpha = self.add_weight(
shape=(self.in_channels,),
name="alpha",
initializer=self.alpha_initializer,
regularizer=self.alpha_regularizer,
constraint=self.alpha_constraint)
channel_axis = (1 if is_channels_first(self.data_format) else len(input_shape) - 1)
assert (self.in_channels == input_shape[channel_axis])
axes = {}
for i in range(1, len(input_shape)):
if i != channel_axis:
axes[i] = input_shape[i]
self.input_spec = tf.keras.layers.InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, x):
if is_channels_first(self.data_format) and (len(x.shape.as_list()) == 4):
x = tf.transpose(x, perm=[0, 2, 3, 1])
pos = K.relu(x)
neg = -self.alpha * K.relu(-x)
x = pos + neg
if is_channels_first(self.data_format) and (len(x.shape.as_list()) == 4):
x = tf.transpose(x, perm=[0, 3, 1, 2])
return x
class Tanh(nn.Layer):
"""
Tanh activation function.
"""
def __init__(self, **kwargs):
super(Tanh, self).__init__(**kwargs)
def call(self, x):
return tf.math.tanh(x)
def get_activation_layer(activation,
**kwargs):
"""
Create activation layer from string/function.
Parameters:
----------
activation : function, or str, or nn.Layer
Activation function or name of activation function.
Returns:
-------
nn.Layer
Activation layer.
"""
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu":
return nn.ReLU(**kwargs)
elif activation == "relu6":
return ReLU6(**kwargs)
elif activation == "prelu2":
return PReLU2(**kwargs)
elif activation == "swish":
return Swish(**kwargs)
elif activation == "hswish":
return HSwish(**kwargs)
elif activation == "sigmoid":
return tf.nn.sigmoid
elif activation == "hsigmoid":
return HSigmoid(**kwargs)
elif activation == "tanh":
return Tanh(**kwargs)
else:
raise NotImplementedError()
else:
assert (isinstance(activation, nn.Layer))
return activation
def flatten(x,
data_format):
"""
Flattens the input to two dimensional.
Parameters:
----------
x : Tensor
Input tensor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
Tensor
Resulted tensor.
"""
if not is_channels_first(data_format):
x = tf.transpose(x, perm=(0, 3, 1, 2))
x = tf.reshape(x, shape=(-1, np.prod(x.get_shape().as_list()[1:])))
return x
class MaxPool2d(nn.Layer):
"""
Max pooling operation for two dimensional (spatial) data.
Parameters:
----------
pool_size : int or tuple/list of 2 int
Size of the max pooling windows.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
pool_size,
strides,
padding=0,
ceil_mode=False,
data_format="channels_last",
**kwargs):
super(MaxPool2d, self).__init__(**kwargs)
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
self.use_stride = (strides[0] > 1) or (strides[1] > 1)
self.ceil_mode = ceil_mode and self.use_stride
self.use_pad = (padding[0] > 0) or (padding[1] > 0)
if self.ceil_mode:
self.padding = padding
self.pool_size = pool_size
self.strides = strides
self.data_format = data_format
elif self.use_pad:
if is_channels_first(data_format):
self.paddings_tf = [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2]
else:
self.paddings_tf = [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]]
self.pool = nn.MaxPooling2D(
pool_size=pool_size,
strides=strides,
padding="valid",
data_format=data_format)
def call(self, x):
if self.ceil_mode:
x_shape = x.get_shape().as_list()
if is_channels_first(self.data_format):
height = x_shape[2]
width = x_shape[3]
else:
height = x_shape[1]
width = x_shape[2]
padding = self.padding
out_height = float(height + 2 * padding[0] - self.pool_size[0]) / self.strides[0] + 1.0
out_width = float(width + 2 * padding[1] - self.pool_size[1]) / self.strides[1] + 1.0
if math.ceil(out_height) > math.floor(out_height):
padding = (padding[0] + 1, padding[1])
if math.ceil(out_width) > math.floor(out_width):
padding = (padding[0], padding[1] + 1)
if (padding[0] > 0) or (padding[1] > 0):
if is_channels_first(self.data_format):
paddings_tf = [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2]
else:
paddings_tf = [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]]
x = tf.pad(x, paddings=paddings_tf)
elif self.use_pad:
x = tf.pad(x, paddings=self.paddings_tf)
x = self.pool(x)
return x
class AvgPool2d(nn.Layer):
"""
Average pooling operation for two dimensional (spatial) data.
Parameters:
----------
pool_size : int or tuple/list of 2 int
Size of the max pooling windows.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
pool_size,
strides,
padding=0,
ceil_mode=False,
data_format="channels_last",
**kwargs):
super(AvgPool2d, self).__init__(**kwargs)
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
self.use_stride = (strides[0] > 1) or (strides[1] > 1)
self.ceil_mode = ceil_mode and self.use_stride
self.use_pad = (padding[0] > 0) or (padding[1] > 0)
if self.ceil_mode:
self.padding = padding
self.pool_size = pool_size
self.strides = strides
self.data_format = data_format
elif self.use_pad:
if is_channels_first(data_format):
self.paddings_tf = [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2]
else:
self.paddings_tf = [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]]
self.pool = nn.AveragePooling2D(
pool_size=pool_size,
strides=1,
padding="valid",
data_format=data_format,
name="pool")
if self.use_stride:
self.stride_pool = nn.AveragePooling2D(
pool_size=1,
strides=strides,
padding="valid",
data_format=data_format,
name="stride_pool")
def call(self, x, training=None):
if self.ceil_mode:
x_shape = x.get_shape().as_list()
if is_channels_first(self.data_format):
height = x_shape[2]
width = x_shape[3]
else:
height = x_shape[1]
width = x_shape[2]
padding = self.padding
out_height = float(height + 2 * padding[0] - self.pool_size[0]) / self.strides[0] + 1.0
out_width = float(width + 2 * padding[1] - self.pool_size[1]) / self.strides[1] + 1.0
if math.ceil(out_height) > math.floor(out_height):
padding = (padding[0] + 1, padding[1])
if math.ceil(out_width) > math.floor(out_width):
padding = (padding[0], padding[1] + 1)
if (padding[0] > 0) or (padding[1] > 0):
if is_channels_first(self.data_format):
paddings_tf = [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2]
else:
paddings_tf = [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]]
x = tf.pad(x, paddings=paddings_tf)
elif self.use_pad:
x = tf.pad(x, paddings=self.paddings_tf)
x = self.pool(x)
if self.use_stride:
x = self.stride_pool(x)
return x
class GlobalAvgPool2d(nn.GlobalAvgPool2D):
"""
Global average pooling.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(GlobalAvgPool2d, self).__init__(data_format=data_format, **kwargs)
self.axis = get_channel_axis(data_format)
def call(self, x, training=None):
x = super(GlobalAvgPool2d, self).call(x, training)
x = tf.expand_dims(tf.expand_dims(x, axis=self.axis), axis=self.axis)
return x
class BatchNorm(nn.BatchNormalization):
"""
MXNet/Gluon-like batch normalization.
Parameters:
----------
momentum : float, default 0.9
Momentum for the moving average.
epsilon : float, default 1e-5
Small float added to variance to avoid dividing by zero.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
momentum=0.9,
epsilon=1e-5,
data_format="channels_last",
**kwargs):
super(BatchNorm, self).__init__(
axis=get_channel_axis(data_format),
momentum=momentum,
epsilon=epsilon,
**kwargs)
class InstanceNorm(nn.Layer):
"""
MXNet/Gluon-like instance normalization layer as in 'Instance Normalization: The Missing Ingredient for Fast
Stylization' (https://arxiv.org/abs/1607.08022). On the base of `tensorflow_addons` implementation.
Parameters:
----------
epsilon : float, default 1e-5
Small float added to variance to avoid dividing by zero.
center : bool, default True
If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored.
scale : bool, default False
If True, multiply by `gamma`. If False, `gamma` is not used.
beta_initializer : str, default 'zeros'
Initializer for the beta weight.
gamma_initializer : str, default 'ones'
Initializer for the gamma weight.
beta_regularizer : object or None, default None
Optional regularizer for the beta weight.
gamma_regularizer : object or None, default None
Optional regularizer for the gamma weight.
beta_constraint : object or None, default None
Optional constraint for the beta weight.
gamma_constraint : object or None, default None
Optional constraint for the gamma weight.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
epsilon=1e-5,
center=True,
scale=False,
beta_initializer="zeros",
gamma_initializer="ones",
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
data_format="channels_last",
**kwargs):
super(InstanceNorm, self).__init__(**kwargs)
self.supports_masking = True
self.groups = -1
self.axis = get_channel_axis(data_format)
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = tf.keras.initializers.get(beta_initializer)
self.gamma_initializer = tf.keras.initializers.get(gamma_initializer)
self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer)
self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer)
self.beta_constraint = tf.keras.constraints.get(beta_constraint)
self.gamma_constraint = tf.keras.constraints.get(gamma_constraint)
self._check_axis()
def build(self, input_shape):
self._check_if_input_shape_is_none(input_shape)
self._set_number_of_groups_for_instance_norm(input_shape)
self._check_size_of_dimensions(input_shape)
self._create_input_spec(input_shape)
self._add_gamma_weight(input_shape)
self._add_beta_weight(input_shape)
self.built = True
super(InstanceNorm, self).build(input_shape)
def call(self, inputs):
input_shape = tf.keras.backend.int_shape(inputs)
tensor_input_shape = tf.shape(inputs)
reshaped_inputs, group_shape = self._reshape_into_groups(inputs, input_shape, tensor_input_shape)
normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape)
outputs = tf.reshape(normalized_inputs, tensor_input_shape)
return outputs
def get_config(self):
config = {
"groups": self.groups,
"axis": self.axis,
"epsilon": self.epsilon,
"center": self.center,
"scale": self.scale,
"beta_initializer": tf.keras.initializers.serialize(self.beta_initializer),
"gamma_initializer": tf.keras.initializers.serialize(self.gamma_initializer),
"beta_regularizer": tf.keras.regularizers.serialize(self.beta_regularizer),
"gamma_regularizer": tf.keras.regularizers.serialize(self.gamma_regularizer),
"beta_constraint": tf.keras.constraints.serialize(self.beta_constraint),
"gamma_constraint": tf.keras.constraints.serialize(self.gamma_constraint)
}
base_config = super(InstanceNorm, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape):
group_shape = [tensor_input_shape[i] for i in range(len(input_shape))]
group_shape[self.axis] = input_shape[self.axis] // self.groups
group_shape.insert(self.axis, self.groups)
group_shape = tf.stack(group_shape)
reshaped_inputs = tf.reshape(inputs, group_shape)
return reshaped_inputs, group_shape
def _apply_normalization(self, reshaped_inputs, input_shape):
group_shape = tf.keras.backend.int_shape(reshaped_inputs)
group_reduction_axes = list(range(1, len(group_shape)))
axis = -2 if self.axis == -1 else self.axis - 1
group_reduction_axes.pop(axis)
mean, variance = tf.nn.moments(reshaped_inputs, group_reduction_axes, keepdims=True)
gamma, beta = self._get_reshaped_weights(input_shape)
normalized_inputs = tf.nn.batch_normalization(
reshaped_inputs,
mean=mean,
variance=variance,
scale=gamma,
offset=beta,
variance_epsilon=self.epsilon)
return normalized_inputs
def _get_reshaped_weights(self, input_shape):
broadcast_shape = self._create_broadcast_shape(input_shape)
gamma = None
beta = None
if self.scale:
gamma = tf.reshape(self.gamma, broadcast_shape)
if self.center:
beta = tf.reshape(self.beta, broadcast_shape)
return gamma, beta
def _check_if_input_shape_is_none(self, input_shape):
dim = input_shape[self.axis]
if dim is None:
raise ValueError("Axis {} of input tensor should have a defined dimension but the layer received an input "
"with shape {}".format(self.axis, input_shape))
def _set_number_of_groups_for_instance_norm(self, input_shape):
dim = input_shape[self.axis]
if self.groups == -1:
self.groups = dim
def _check_size_of_dimensions(self, input_shape):
dim = input_shape[self.axis]
if dim < self.groups:
raise ValueError("Number of groups ({}) cannot be more than the number of channels ({})".format(
self.groups, dim))
if (dim % self.groups) != 0:
raise ValueError('Number of groups ({}) must be a multiple of the number of channels ({})'.format(
self.groups, dim))
def _check_axis(self):
if self.axis == 0:
raise ValueError("You are trying to normalize your batch axis. Do you want to use "
"tf.layer.batch_normalization instead")
def _create_input_spec(self, input_shape):
dim = input_shape[self.axis]
self.input_spec = tf.keras.layers.InputSpec(
ndim=len(input_shape),
axes={self.axis: dim})
def _add_gamma_weight(self, input_shape):
dim = input_shape[self.axis]
shape = (dim,)
if self.scale:
self.gamma = self.add_weight(
shape=shape,
name="gamma",
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
else:
self.gamma = None
def _add_beta_weight(self, input_shape):
dim = input_shape[self.axis]
shape = (dim,)
if self.center:
self.beta = self.add_weight(
shape=shape,
name="beta",
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
def _create_broadcast_shape(self, input_shape):
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
broadcast_shape.insert(self.axis, self.groups)
return broadcast_shape
class IBN(nn.Layer):
"""
Instance-Batch Normalization block from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : int
Number of channels.
inst_fraction : float, default 0.5
The first fraction of channels for normalization.
inst_first : bool, default True
Whether instance normalization be on the first part of channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
first_fraction=0.5,
inst_first=True,
data_format="channels_last",
**kwargs):
super(IBN, self).__init__(**kwargs)
self.inst_first = inst_first
self.data_format = data_format
h1_channels = int(math.floor(channels * first_fraction))
h2_channels = channels - h1_channels
self.split_sections = [h1_channels, h2_channels]
if self.inst_first:
self.inst_norm = InstanceNorm(
scale=True,
data_format=data_format,
name="inst_norm")
self.batch_norm = BatchNorm(
data_format=data_format,
name="batch_norm")
else:
self.batch_norm = BatchNorm(
data_format=data_format,
name="batch_norm")
self.inst_norm = InstanceNorm(
scale=True,
data_format=data_format,
name="inst_norm")
def call(self, x, training=None):
axis = get_channel_axis(self.data_format)
x1, x2 = tf.split(x, num_or_size_splits=self.split_sections, axis=axis)
if self.inst_first:
x1 = self.inst_norm(x1, training=training)
x2 = self.batch_norm(x2, training=training)
else:
x1 = self.batch_norm(x1, training=training)
x2 = self.inst_norm(x2, training=training)
x = tf.concat([x1, x2], axis=axis)
return x
class Conv1d(nn.Layer):
"""
Standard 1D convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int, default 1
Strides of the convolution.
padding : int, default 0
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default True
Whether the layer uses a bias vector.
force_same : bool, default False
Whether to forcibly set `same` padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
dilation=1,
groups=1,
use_bias=True,
force_same=False,
data_format="channels_last",
**kwargs):
super(Conv1d, self).__init__(**kwargs)
assert (in_channels is not None)
assert (not force_same) or ((padding == kernel_size // 2) and (strides == 1) and (dilation == 1))
self.use_conv = (groups == 1)
self.use_dw_conv = (groups > 1) and (groups == out_channels) and (out_channels == in_channels)
self.data_format = data_format
self.use_pad = (padding > 0) and (not force_same)
if self.use_pad:
self.pad = nn.ZeroPadding1D(padding=padding)
if self.use_conv:
self.conv = nn.Conv1D(
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=("valid" if not force_same else "same"),
data_format=data_format,
dilation_rate=dilation,
use_bias=use_bias,
name="conv")
elif self.use_dw_conv:
self.dw_conv = nn.DepthwiseConv2D(
kernel_size=(kernel_size, 1),
strides=strides,
padding=("valid" if not force_same else "same"),
data_format=data_format,
dilation_rate=dilation,
use_bias=use_bias,
name="dw_conv")
def call(self, x):
if self.use_pad:
if is_channels_first(self.data_format):
x = tf.transpose(x, perm=(0, 2, 1))
x = self.pad(x)
if is_channels_first(self.data_format):
x = tf.transpose(x, perm=(0, 2, 1))
if self.use_conv:
x = self.conv(x)
elif self.use_dw_conv:
if is_channels_first(self.data_format):
x = tf.expand_dims(x, axis=3)
else:
x = tf.expand_dims(x, axis=2)
x = self.dw_conv(x)
if is_channels_first(self.data_format):
x = tf.squeeze(x, axis=3)
else:
x = tf.squeeze(x, axis=2)
return x
class Conv2d(nn.Layer):
"""
Standard convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default True
Whether the layer uses a bias vector.
force_same : bool, default False
Whether to forcibly set `same` padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
dilation=1,
groups=1,
use_bias=True,
force_same=False,
data_format="channels_last",
**kwargs):
super(Conv2d, self).__init__(**kwargs)
assert (in_channels is not None)
self.data_format = data_format
self.use_conv = (groups == 1)
self.use_dw_conv = (groups > 1) and (groups == out_channels) and (out_channels == in_channels)
# assert (strides == 1) or (dilation == 1)
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if isinstance(dilation, int):
dilation = (dilation, dilation)
self.use_pad = ((padding[0] > 0) or (padding[1] > 0)) and (not force_same)
if self.use_pad:
self.pad = nn.ZeroPadding2D(
padding=padding,
data_format=data_format)
# if is_channels_first(data_format):
# self.paddings_tf = [[0, 0], [0, 0], list(padding), list(padding)]
# else:
# self.paddings_tf = [[0, 0], list(padding), list(padding), [0, 0]]
# self.use_post_pad = (dilation[0] > 1) and (dilation[0] % 2 == 1) and (dilation[0] == dilation[1]) and\
# (dilation[0] == padding[1]) and (padding[0] == padding[1])
# if self.use_post_pad:
# self.post_pad = nn.ZeroPadding2D(
# padding=((1, 0), (1, 0)),
# data_format=data_format)
assert (not force_same) or ((padding[0] == kernel_size[0] // 2) and (padding[1] == kernel_size[1] // 2) and
(strides[0] == 1) and (strides[0] == strides[1]) and (dilation[0] == 1) and
(dilation[0] == dilation[1]))
if self.use_conv:
self.conv = nn.Conv2D(
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=("valid" if not force_same else "same"),
data_format=data_format,
dilation_rate=dilation,
use_bias=use_bias,
name="conv")
elif self.use_dw_conv:
# assert (dilation[0] == 1) and (dilation[1] == 1)
self.dw_conv = nn.DepthwiseConv2D(
kernel_size=kernel_size,
strides=strides,
padding=("valid" if not force_same else "same"),
data_format=data_format,
dilation_rate=dilation,
use_bias=use_bias,
name="dw_conv")
else:
assert (groups > 1)
assert (in_channels % groups == 0)
assert (out_channels % groups == 0)
self.groups = groups
self.convs = []
for i in range(groups):
self.convs.append(nn.Conv2D(
filters=(out_channels // groups),
kernel_size=kernel_size,
strides=strides,
padding="valid",
data_format=data_format,
dilation_rate=dilation,
use_bias=use_bias,
name="convgroup{}".format(i + 1)))
def call(self, x):
if self.use_pad:
x = self.pad(x)
# x = tf.pad(x, paddings=self.paddings_tf)
if self.use_conv:
try:
x = self.conv(x)
except tf.errors.InvalidArgumentError as ex:
if self.conv.dilation_rate != (1, 1):
conv_ = nn.Conv2D(
filters=self.conv.filters,
kernel_size=self.conv.kernel_size,
strides=self.conv.strides,
padding="valid",
data_format=self.data_format,
dilation_rate=self.conv.dilation_rate,
use_bias=self.conv.use_bias,
name="conv_")
_ = conv_(x)
conv_.weights[0].assign(self.conv.weights[0])
if len(self.conv.weights) > 1:
conv_.weights[1].assign(self.conv.weights[1])
x = conv_(x)
else:
raise ex
# x = self.conv(x)
elif self.use_dw_conv:
x = self.dw_conv(x)
else:
yy = []
xx = tf.split(x, num_or_size_splits=self.groups, axis=get_channel_axis(self.data_format))
for xi, convi in zip(xx, self.convs):
yy.append(convi(xi))
x = tf.concat(yy, axis=get_channel_axis(self.data_format))
# if self.use_post_pad:
# x = self.post_pad(x)
return x
class SelectableDense(nn.Layer):
"""
Selectable dense layer.
Parameters:
----------
in_channels : int
Number of input features.
out_channels : int
Number of output features.
use_bias : bool, default False
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`, default 'glorot_uniform'
Initializer for the `kernel` weights matrix.
bias_initializer: str or `Initializer`
Initializer for the bias vector.
num_options : int, default 1
Number of selectable options.
"""
def __init__(self,
in_channels,
out_channels,
use_bias=False,
weight_initializer="glorot_uniform",
bias_initializer="zeros",
num_options=1,
**kwargs):
super(SelectableDense, self).__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.use_bias = use_bias
self.num_options = num_options
self.weight_initializer = initializers.get(weight_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.supports_masking = True
self.input_spec = tf.keras.layers.InputSpec(min_ndim=2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
last_dim = tensor_shape.dimension_value(input_shape[-1])
self.input_spec = tf.keras.layers.InputSpec(min_ndim=2, axes={-1: last_dim})
self.weight = self.add_weight(
"weight",
shape=[self.num_options, self.out_channels, self.in_channels],
initializer=self.weight_initializer,
regularizer=None,
constraint=None,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
"bias",
shape=[self.num_options, self.out_channels],
initializer=self.bias_initializer,
regularizer=None,
constraint=None,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, x, indices):
weight = tf.gather(self.weight.value(), indices=indices, axis=0)
x = tf.expand_dims(x, axis=-1)
x = tf.keras.backend.batch_dot(weight, x)
x = tf.squeeze(x, axis=-1)
if self.use_bias:
bias = tf.gather(self.bias.value(), indices=indices, axis=0)
x += bias
return x
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
return input_shape[:-1].concatenate(self.out_channels)
def get_config(self):
config = {
"in_channels": self.in_channels,
"out_channels": self.out_channels,
"use_bias": self.use_bias,
"num_options": self.num_options,
"weight_initializer": initializers.serialize(self.weight_initializer),
"bias_initializer": initializers.serialize(self.bias_initializer),
}
base_config = super(SelectableDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class DenseBlock(nn.Layer):
"""
Standard dense block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input features.
out_channels : int
Number of output features.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
super(DenseBlock, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
self.fc = nn.Dense(
units=out_channels,
use_bias=use_bias,
input_dim=in_channels,
name="fc")
if self.use_bn:
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
if self.activate:
self.activ = get_activation_layer(activation, name="activ")
def call(self, x, training=None):
x = self.fc(x)
if self.use_bn:
x = self.bn(x, training=training)
if self.activate:
x = self.activ(x)
return x
class ConvBlock1d(nn.Layer):
"""
Standard 1D convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int or
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
force_same : bool, default False
Whether to forcibly set `same` padding in convolution.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
force_same=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
super(ConvBlock1d, self).__init__(**kwargs)
assert (in_channels is not None)
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
force_same=force_same,
data_format=data_format,
name="conv")
if self.use_bn:
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
if self.activate:
self.activ = get_activation_layer(activation, name="activ")
def call(self, x, training=None):
x = self.conv(x)
if self.use_bn:
x = self.bn(x, training=training)
if self.activate:
x = self.activ(x)
return x
def conv1x1(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
data_format="channels_last",
**kwargs):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
data_format=data_format,
**kwargs)
def conv3x3(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
use_bias=False,
data_format="channels_last",
**kwargs):
"""
Convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
data_format=data_format,
**kwargs)
def depthwise_conv3x3(channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
data_format="channels_last",
**kwargs):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return Conv2d(
in_channels=channels,
out_channels=channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=channels,
use_bias=use_bias,
data_format=data_format,
**kwargs)
class ConvBlock(nn.Layer):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
force_same : bool, default False
Whether to forcibly set `same` padding in convolution.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
force_same=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
super(ConvBlock, self).__init__(**kwargs)
assert (in_channels is not None)
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
force_same=force_same,
data_format=data_format,
name="conv")
if self.use_bn:
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
if self.activate:
self.activ = get_activation_layer(activation, name="activ")
def call(self, x, training=None):
x = self.conv(x)
if self.use_bn:
x = self.bn(x, training=training)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
strides=1,
padding=0,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
"""
1x1 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=padding,
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs)
def conv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs)
def conv5x5_block(in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
groups=1,
use_bias=False,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
"""
5x5 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs)
def conv7x7_block(in_channels,
out_channels,
strides=1,
padding=3,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
"""
7x7 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
padding=padding,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs)
def dwconv_block(in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
"""
Depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs)
def dwconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
"""
3x3 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs)
def dwconv5x5_block(in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
use_bias=False,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
"""
5x5 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs)
class DwsConvBlock(nn.Layer):
"""
Depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
dw_force_same : bool, default False
Whether to forcibly set `same` padding in depthwise convolution block.
pw_force_same : bool, default False
Whether to forcibly set `same` padding in pointwise convolution block.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default 'relu'
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default 'relu'
Activation function after the pointwise convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
dw_force_same=False,
pw_force_same=False,
dw_use_bn=True,
pw_use_bn=True,
bn_eps=1e-5,
dw_activation="relu",
pw_activation="relu",
data_format="channels_last",
**kwargs):
super(DwsConvBlock, self).__init__(**kwargs)
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
force_same=dw_force_same,
use_bn=dw_use_bn,
bn_eps=bn_eps,
activation=dw_activation,
data_format=data_format,
name="dw_conv")
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
force_same=pw_force_same,
use_bn=pw_use_bn,
bn_eps=bn_eps,
activation=pw_activation,
data_format=data_format,
name="pw_conv")
def call(self, x, training=None):
x = self.dw_conv(x, training=training)
x = self.pw_conv(x, training=training)
return x
def dwsconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
bn_eps=1e-5,
dw_activation="relu",
pw_activation="relu",
data_format="channels_last",
**kwargs):
"""
3x3 depthwise separable version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default 'relu'
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default 'relu'
Activation function after the pointwise convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
bn_eps=bn_eps,
dw_activation=dw_activation,
pw_activation=pw_activation,
data_format=data_format,
**kwargs)
class PreConvBlock(nn.Layer):
"""
Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
return_preact=False,
activate=True,
data_format="channels_last",
**kwargs):
super(PreConvBlock, self).__init__(**kwargs)
self.return_preact = return_preact
self.activate = activate
self.use_bn = use_bn
if self.use_bn:
self.bn = BatchNorm(
data_format=data_format,
name="bn")
if self.activate:
self.activ = nn.ReLU()
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name="conv")
def call(self, x, training=None):
if self.use_bn:
x = self.bn(x, training=training)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(in_channels,
out_channels,
strides=1,
use_bias=False,
use_bn=True,
return_preact=False,
activate=True,
data_format="channels_last",
**kwargs):
"""
1x1 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
use_bias=use_bias,
use_bn=use_bn,
return_preact=return_preact,
activate=activate,
data_format=data_format,
**kwargs)
def pre_conv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
return_preact=False,
activate=True,
data_format="channels_last",
**kwargs):
"""
3x3 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
return_preact=return_preact,
activate=activate,
data_format=data_format,
**kwargs)
class Deconv2d(nn.Layer):
"""
Standard deconvolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
out_padding : int or tuple/list of 2 int, default 0
Output padding value for deconvolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default True
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
out_padding=0,
dilation=1,
groups=1,
use_bias=True,
data_format="channels_last",
**kwargs):
super(Deconv2d, self).__init__(**kwargs)
assert (dilation == 1)
assert (groups == 1)
assert (in_channels is not None)
if isinstance(padding, int):
padding = (padding, padding)
self.use_crop = (padding[0] > 0) or (padding[1] > 0)
if self.use_crop:
self.crop = nn.Cropping2D(
cropping=padding,
data_format=data_format,
name="crop")
self.conv = nn.Conv2DTranspose(
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding="valid",
output_padding=out_padding,
data_format=data_format,
dilation_rate=dilation,
use_bias=use_bias,
name="conv")
def call(self, x):
x = self.conv(x)
if self.use_crop:
x = self.crop(x)
return x
class DeconvBlock(nn.Layer):
"""
Deconvolution block with batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the deconvolution.
padding : int or tuple/list of 2 int
Padding value for deconvolution layer.
out_padding : int or tuple/list of 2 int, default 0
Output padding value for deconvolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for deconvolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
out_padding=0,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
super(DeconvBlock, self).__init__(**kwargs)
assert (in_channels is not None)
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = Deconv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
out_padding=out_padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name="conv")
if self.use_bn:
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
if self.activate:
self.activ = get_activation_layer(activation, name="activ")
def call(self, x, training=None):
x = self.conv(x)
if self.use_bn:
x = self.bn(x, training=training)
if self.activate:
x = self.activ(x)
return x
def channel_shuffle(x,
groups,
data_format):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
data_format : str
The ordering of the dimensions in tensors.
Returns:
-------
Tensor
Resulted tensor.
"""
x_shape = x.get_shape().as_list()
if is_channels_first(data_format):
channels = x_shape[1]
height = x_shape[2]
width = x_shape[3]
else:
height = x_shape[1]
width = x_shape[2]
channels = x_shape[3]
assert (channels % groups == 0)
channels_per_group = channels // groups
if is_channels_first(data_format):
x = tf.reshape(x, shape=(-1, groups, channels_per_group, height, width))
x = tf.transpose(x, perm=(0, 2, 1, 3, 4))
x = tf.reshape(x, shape=(-1, channels, height, width))
else:
x = tf.reshape(x, shape=(-1, height, width, groups, channels_per_group))
x = tf.transpose(x, perm=(0, 1, 2, 4, 3))
x = tf.reshape(x, shape=(-1, height, width, channels))
return x
class ChannelShuffle(nn.Layer):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
groups,
data_format="channels_last",
**kwargs):
super(ChannelShuffle, self).__init__(**kwargs)
assert (channels % groups == 0)
self.groups = groups
self.data_format = data_format
def call(self, x):
return channel_shuffle(x, groups=self.groups, data_format=self.data_format)
def channel_shuffle2(x,
channels_per_group,
data_format):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
The alternative version.
Parameters:
----------
x : Tensor
Input tensor.
channels_per_group : int
Number of groups.
data_format : str
Number of channels per group.
Returns:
-------
keras.Tensor
Resulted tensor.
"""
x_shape = x.get_shape().as_list()
if is_channels_first(data_format):
channels = x_shape[1]
height = x_shape[2]
width = x_shape[3]
else:
height = x_shape[1]
width = x_shape[2]
channels = x_shape[3]
assert (channels % channels_per_group == 0)
groups = channels // channels_per_group
if is_channels_first(data_format):
x = tf.reshape(x, shape=(-1, channels_per_group, groups, height, width))
x = tf.transpose(x, perm=(0, 2, 1, 3, 4))
x = tf.reshape(x, shape=(-1, channels, height, width))
else:
x = tf.reshape(x, shape=(-1, height, width, channels_per_group, groups))
x = tf.transpose(x, perm=(0, 1, 2, 4, 3))
x = tf.reshape(x, shape=(-1, height, width, channels))
return x
class ChannelShuffle2(nn.Layer):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
The alternative version.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
groups,
data_format="channels_last",
**kwargs):
super(ChannelShuffle2, self).__init__(**kwargs)
assert (channels % groups == 0)
self.channels_per_group = channels // groups
self.data_format = data_format
def call(self, x):
return channel_shuffle2(x, channels_per_group=self.channels_per_group, data_format=self.data_format)
class SEBlock(nn.Layer):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
mid_channels : int or None, default None
Number of middle channels.
round_mid : bool, default False
Whether to round middle channel number (make divisible by 8).
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
activation : function, or str, or nn.Layer, default 'relu'
Activation function after the first convolution.
out_activation : function, or str, or nn.Layer, default 'sigmoid'
Activation function after the last convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
reduction=16,
mid_channels=None,
round_mid=False,
use_conv=True,
mid_activation="relu",
out_activation="sigmoid",
data_format="channels_last",
**kwargs):
super(SEBlock, self).__init__(**kwargs)
self.use_conv = use_conv
self.data_format = data_format
if mid_channels is None:
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
self.pool = nn.GlobalAveragePooling2D(
data_format=data_format,
name="pool")
if use_conv:
self.conv1 = conv1x1(
in_channels=channels,
out_channels=mid_channels,
use_bias=True,
data_format=data_format,
name="conv1")
else:
self.fc1 = nn.Dense(
units=mid_channels,
input_dim=channels,
name="fc1")
self.activ = get_activation_layer(mid_activation, name="activ")
if use_conv:
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels,
use_bias=True,
data_format=data_format,
name="conv2")
else:
self.fc2 = nn.Dense(
units=channels,
input_dim=mid_channels,
name="fc2")
self.sigmoid = get_activation_layer(out_activation, name="sigmoid")
def call(self, x, training=None):
w = self.pool(x)
if self.use_conv:
axis = -1 if is_channels_first(self.data_format) else 1
w = tf.expand_dims(tf.expand_dims(w, axis=axis), axis=axis)
w = self.conv1(w) if self.use_conv else self.fc1(w)
w = self.activ(w)
w = self.conv2(w) if self.use_conv else self.fc2(w)
w = self.sigmoid(w)
if not self.use_conv:
axis = -1 if is_channels_first(self.data_format) else 1
w = tf.expand_dims(tf.expand_dims(w, axis=axis), axis=axis)
x = x * w
return x
class SABlock(nn.Layer):
"""
Split-Attention block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
out_channels : int
Number of output channels.
groups : int
Number of channel groups (cardinality, without radix).
radix : int
Number of splits within a cardinal group.
reduction : int, default 4
Squeeze reduction value.
min_channels : int, default 32
Minimal number of squeezed channels.
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
out_channels,
groups,
radix,
reduction=4,
min_channels=32,
use_conv=True,
bn_eps=1e-5,
data_format="channels_last",
**kwargs):
super(SABlock, self).__init__(**kwargs)
self.groups = groups
self.radix = radix
self.use_conv = use_conv
self.data_format = data_format
self.axis = get_channel_axis(data_format)
in_channels = out_channels * radix
mid_channels = max(in_channels // reduction, min_channels)
self.pool = nn.GlobalAveragePooling2D(
data_format=data_format,
name="pool")
if use_conv:
self.conv1 = conv1x1(
in_channels=out_channels,
out_channels=mid_channels,
use_bias=True,
data_format=data_format,
name="conv1")
else:
self.fc1 = nn.Dense(
units=mid_channels,
input_dim=out_channels,
name="fc1")
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
self.activ = nn.ReLU()
if use_conv:
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=in_channels,
use_bias=True,
data_format=data_format,
name="conv2")
else:
self.fc2 = nn.Dense(
units=in_channels,
input_dim=mid_channels,
name="fc2")
self.softmax = nn.Softmax(axis=1)
def call(self, x, training=None):
x_shape = x.get_shape().as_list()
# batch = x_shape[0]
if is_channels_first(self.data_format):
channels = x_shape[1]
height = x_shape[2]
width = x_shape[3]
x = tf.reshape(x, shape=(-1, self.radix, channels // self.radix, height, width))
w = tf.math.reduce_sum(x, axis=1)
else:
height = x_shape[1]
width = x_shape[2]
channels = x_shape[3]
x = tf.reshape(x, shape=(-1, height, width, self.radix, channels // self.radix))
w = tf.math.reduce_sum(x, axis=-2)
w = self.pool(w)
if self.use_conv:
axis = -1 if is_channels_first(self.data_format) else 1
w = tf.expand_dims(tf.expand_dims(w, axis=axis), axis=axis)
w = self.conv1(w) if self.use_conv else self.fc1(w)
w = self.bn(w, training=training)
w = self.activ(w)
w = self.conv2(w) if self.use_conv else self.fc2(w)
w = tf.reshape(w, shape=(-1, self.groups, self.radix, channels // self.groups // self.radix))
w = tf.transpose(w, perm=(0, 2, 1, 3))
w = self.softmax(w)
if is_channels_first(self.data_format):
w = tf.reshape(w, shape=(-1, self.radix, channels // self.radix, 1, 1))
else:
w = tf.reshape(w, shape=(-1, 1, 1, self.radix, channels // self.radix))
x = x * w
if is_channels_first(self.data_format):
x = tf.math.reduce_sum(x, axis=1)
else:
x = tf.math.reduce_sum(x, axis=-2)
return x
class SAConvBlock(nn.Layer):
"""
Split-Attention convolution block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
force_same : bool, default False
Whether to forcibly set `same` padding in convolution.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
radix : int, default 2
Number of splits within a cardinal group.
reduction : int, default 4
Squeeze reduction value.
min_channels : int, default 32
Minimal number of squeezed channels.
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
force_same=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
radix=2,
reduction=4,
min_channels=32,
use_conv=True,
data_format="channels_last",
**kwargs):
super(SAConvBlock, self).__init__(**kwargs)
self.conv = ConvBlock(
in_channels=in_channels,
out_channels=(out_channels * radix),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=(groups * radix),
use_bias=use_bias,
force_same=force_same,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
name="conv")
self.att = SABlock(
out_channels=out_channels,
groups=groups,
radix=radix,
reduction=reduction,
min_channels=min_channels,
use_conv=use_conv,
bn_eps=bn_eps,
data_format=data_format,
name="att")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.att(x, training=training)
return x
def saconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
**kwargs):
"""
3x3 version of the Split-Attention convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
"""
return SAConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
**kwargs)
class PixelShuffle(nn.Layer):
"""
Pixel-shuffle operation from 'Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel
Convolutional Neural Network,' https://arxiv.org/abs/1609.05158.
Parameters:
----------
scale_factor : int
Multiplier for spatial size.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
scale_factor,
data_format="channels_last",
**kwargs):
super(PixelShuffle, self).__init__(**kwargs)
self.scale_factor = scale_factor
self.data_format = data_format
def call(self, x, training=None):
if not tf.executing_eagerly():
return PixelShuffle2(scale_factor=self.scale_factor, data_format=self.data_format)(x)
f1 = self.scale_factor
f2 = self.scale_factor
x_shape = x.get_shape().as_list()
if is_channels_first(self.data_format):
channels = x_shape[1]
height = x_shape[2]
width = x_shape[3]
else:
height = x_shape[1]
width = x_shape[2]
channels = x_shape[3]
assert (channels % f1 % f2 == 0)
new_channels = channels // f1 // f2
if is_channels_first(self.data_format):
x = tf.reshape(x, shape=(-1, new_channels, f1 * f2, height, width))
x = tf.reshape(x, shape=(-1, new_channels, f1, f2, height, width))
x = tf.transpose(x, perm=(0, 1, 4, 2, 5, 3))
x = tf.reshape(x, shape=(-1, new_channels, height * f1, width * f2))
else:
x = tf.reshape(x, shape=(-1, height, width, new_channels, f1 * f2))
x = tf.reshape(x, shape=(-1, height, width, new_channels, f1, f2))
x = tf.transpose(x, perm=(0, 1, 4, 2, 5, 3))
x = tf.reshape(x, shape=(-1, height * f1, width * f2, new_channels))
return x
class PixelShuffle2(nn.Layer):
"""
Pixel-shuffle operation from 'Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel
Convolutional Neural Network,' https://arxiv.org/abs/1609.05158. Alternative implementation.
Parameters:
----------
scale_factor : int
Multiplier for spatial size.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
scale_factor,
data_format="channels_last",
**kwargs):
super(PixelShuffle2, self).__init__(**kwargs)
self.scale_factor = scale_factor
self.data_format = data_format
def call(self, x, training=None):
tf_data_format = "NCHW" if is_channels_first(self.data_format) else "NHWC"
x = tf.nn.depth_to_space(input=x, block_size=self.scale_factor, data_format=tf_data_format)
return x
class DucBlock(nn.Layer):
"""
Dense Upsampling Convolution (DUC) block from 'Understanding Convolution for Semantic Segmentation,'
https://arxiv.org/abs/1702.08502.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
scale_factor : int
Multiplier for spatial size.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor,
data_format="channels_last",
**kwargs):
super(DucBlock, self).__init__(**kwargs)
mid_channels = (scale_factor * scale_factor) * out_channels
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv")
self.pix_shuffle = PixelShuffle(
scale_factor=scale_factor,
data_format=data_format,
name="pix_shuffle")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.pix_shuffle(x)
return x
class Identity(nn.Layer):
"""
Identity layer.
"""
def __init__(self,
**kwargs):
super(Identity, self).__init__(**kwargs)
def call(self, x, training=None):
return x
class SimpleSequential(nn.Layer):
"""
A sequential layer that can be used instead of tf.keras.Sequential.
"""
def __init__(self,
**kwargs):
super(SimpleSequential, self).__init__(**kwargs)
self.children = []
def __getitem__(self, i):
return self.children[i]
def __len__(self):
return len(self.children)
def add(self, layer):
layer._name = "{}/{}".format(self.name, layer._name)
self.children.append(layer)
def call(self, x, training=None):
for block in self.children:
x = block(x, training=training)
return x
class ParametricSequential(SimpleSequential):
"""
A sequential container for layers with parameters.
Layers will be executed in the order they are added.
"""
def __init__(self,
**kwargs):
super(ParametricSequential, self).__init__(**kwargs)
def call(self, x, **kwargs):
for block in self.children:
x = block(x, **kwargs)
return x
class DualPathSequential(SimpleSequential):
"""
A sequential container for layers with dual inputs/outputs.
Layers will be executed in the order they are added.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first layers with single input/output.
last_ordinals : int, default 0
Number of the final layers with single input/output.
dual_path_scheme : function
Scheme of dual path response for a layer.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal layer.
"""
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda block, x1, x2, training: block(x1, x2, training)),
dual_path_scheme_ordinal=(lambda block, x1, x2, training: (block(x1, training), x2)),
**kwargs):
super(DualPathSequential, self).__init__(**kwargs)
self.return_two = return_two
self.first_ordinals = first_ordinals
self.last_ordinals = last_ordinals
self.dual_path_scheme = dual_path_scheme
self.dual_path_scheme_ordinal = dual_path_scheme_ordinal
def call(self, x1, x2=None, training=None):
length = len(self.children)
for i, block in enumerate(self.children):
if (i < self.first_ordinals) or (i >= length - self.last_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(block, x1, x2, training)
else:
x1, x2 = self.dual_path_scheme(block, x1, x2, training)
if self.return_two:
return x1, x2
else:
return x1
class Concurrent(SimpleSequential):
"""
A container for concatenation of layers.
Parameters:
----------
stack : bool, default False
Whether to concatenate tensors along a new dimension.
merge_type : str, default None
Type of branch merging.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
stack=False,
merge_type=None,
data_format="channels_last",
**kwargs):
super(Concurrent, self).__init__(**kwargs)
assert (merge_type is None) or (merge_type in ["cat", "stack", "sum"])
self.axis = get_channel_axis(data_format)
if merge_type is not None:
self.merge_type = merge_type
else:
self.merge_type = "stack" if stack else "cat"
def call(self, x, training=None):
out = []
for block in self.children:
out.append(block(x, training=training))
if self.merge_type == "stack":
out = tf.stack(out, axis=self.axis)
elif self.merge_type == "cat":
out = tf.concat(out, axis=self.axis)
elif self.merge_type == "sum":
out = tf.stack(out, axis=self.axis)
out = tf.math.reduce_sum(out, axis=self.axis)
else:
raise NotImplementedError()
return out
class SequentialConcurrent(SimpleSequential):
"""
A sequential container with concatenated outputs.
Blocks will be executed in the order they are added.
Parameters:
----------
stack : bool, default False
Whether to concatenate tensors along a new dimension.
cat_input : bool, default True
Whether to concatenate input tensor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
stack=False,
cat_input=True,
data_format="channels_last",
**kwargs):
super(SequentialConcurrent, self).__init__(**kwargs)
self.axis = get_channel_axis(data_format)
self.stack = stack
self.cat_input = cat_input
def call(self, x, training=None):
out = [x] if self.cat_input else []
for block in self.children:
x = block(x, training=training)
out.append(x)
if self.stack:
out = tf.stack(out, axis=self.axis)
else:
out = tf.concat(out, axis=self.axis)
return out
class ParametricConcurrent(SimpleSequential):
"""
A container for concatenation of layers with parameters.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(ParametricConcurrent, self).__init__(**kwargs)
self.axis = get_channel_axis(data_format)
def call(self, x, **kwargs):
out = []
for block in self.children:
out.append(block(x, **kwargs))
out = tf.concat(out, axis=self.axis)
return out
class MultiOutputSequential(SimpleSequential):
"""
A sequential container with multiple outputs.
Layers will be executed in the order they are added.
Parameters:
----------
multi_output : bool, default True
Whether to return multiple output.
dual_output : bool, default False
Whether to return dual output.
return_last : bool, default True
Whether to forcibly return last value.
"""
def __init__(self,
multi_output=True,
dual_output=False,
return_last=True,
**kwargs):
super(MultiOutputSequential, self).__init__(**kwargs)
self.multi_output = multi_output
self.dual_output = dual_output
self.return_last = return_last
def call(self, x, **kwargs):
outs = []
for block in self.children:
x = block(x, **kwargs)
if hasattr(block, "do_output") and block.do_output:
outs.append(x)
elif hasattr(block, "do_output2") and block.do_output2:
assert (type(x) == tuple)
outs.extend(x[1])
x = x[0]
if self.multi_output:
return [x] + outs if self.return_last else outs
elif self.dual_output:
return x, outs
else:
return x
class ParallelConcurent(SimpleSequential):
"""
A sequential container with multiple inputs and single/multiple outputs.
Modules will be executed in the order they are added.
Parameters:
----------
merge_type : str, default None
Type of branch merging.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
merge_type=None,
data_format="channels_last",
**kwargs):
super(ParallelConcurent, self).__init__(**kwargs)
assert (merge_type is None) or (merge_type in ["list", "cat", "stack", "sum"])
self.axis = get_channel_axis(data_format)
self.merge_type = merge_type
def call(self, x, training=None):
out = []
for block, xi in zip(self.children, x):
out.append(block(xi, training=training))
if self.merge_type == "list":
pass
elif self.merge_type == "stack":
out = tf.stack(out, axis=self.axis)
elif self.merge_type == "cat":
out = tf.concat(out, axis=self.axis)
elif self.merge_type == "sum":
out = tf.stack(out, axis=self.axis)
out = tf.math.reduce_sum(out, axis=self.axis)
else:
raise NotImplementedError()
return out
class DualPathParallelConcurent(SimpleSequential):
"""
A sequential container with multiple dual-path inputs and single/multiple outputs.
Modules will be executed in the order they are added.
Parameters:
----------
merge_type : str, default 'list'
Type of branch merging.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
merge_type="list",
data_format="channels_last",
**kwargs):
super(DualPathParallelConcurent, self).__init__(**kwargs)
assert (merge_type is None) or (merge_type in ["list", "cat", "stack", "sum"])
self.axis = get_channel_axis(data_format)
self.merge_type = merge_type
def call(self, x1, x2, training=None):
x1_out = []
x2_out = []
for block, x1i, x2i in zip(self.children, x1, x2):
y1i, y2i = block(x1i, x2i, training=training)
x1_out.append(y1i)
x2_out.append(y2i)
if self.merge_type == "list":
pass
elif self.merge_type == "stack":
x1_out = tf.stack(x1_out, axis=self.axis)
x2_out = tf.stack(x2_out, axis=self.axis)
elif self.merge_type == "cat":
x1_out = tf.concat(x1_out, axis=self.axis)
x2_out = tf.concat(x2_out, axis=self.axis)
elif self.merge_type == "sum":
x1_out = tf.math.reduce_sum(tf.stack(x1_out, axis=self.axis), axis=self.axis)
x2_out = tf.math.reduce_sum(tf.stack(x2_out, axis=self.axis), axis=self.axis)
else:
raise NotImplementedError()
return x1_out, x2_out
class NormActivation(nn.Layer):
"""
Activation block with preliminary batch normalization. It's used by itself as the final block in PreResNet.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
super(NormActivation, self).__init__(**kwargs)
assert (in_channels is not None)
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
self.activ = get_activation_layer(activation, name="activ")
def call(self, x, training=None):
x = self.bn(x, training=training)
x = self.activ(x)
return x
class InterpolationBlock(nn.Layer):
"""
Bilinear interpolation block.
Parameters:
----------
scale_factor : int, default 1
Multiplier for spatial size.
out_size : tuple of 2 int, default None
Spatial size of the output tensor for the interpolation operation.
up : bool, default True
Whether to upsample or downsample.
interpolation : str, default 'bilinear'
Interpolation mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
scale_factor=1,
out_size=None,
up=True,
interpolation="bilinear",
data_format="channels_last",
**kwargs):
super(InterpolationBlock, self).__init__(**kwargs)
self.scale_factor = scale_factor
self.out_size = out_size
self.up = up
self.data_format = data_format
self.method = tf.image.ResizeMethod.BILINEAR if interpolation == "bilinear" else\
tf.image.ResizeMethod.NEAREST_NEIGHBOR
def call(self, x, size=None, training=None):
out_size = self.calc_out_size(x) if size is None else size
if is_channels_first(self.data_format):
x = tf.transpose(x, perm=[0, 2, 3, 1])
x = tf.image.resize(
images=x,
size=out_size,
method=self.method)
if is_channels_first(self.data_format):
x = tf.transpose(x, perm=[0, 3, 1, 2])
return x
def calc_out_size(self, x):
if self.out_size is not None:
return self.out_size
in_size = get_im_size(x, data_format=self.data_format)
if self.up:
return tuple(s * self.scale_factor for s in in_size)
else:
return tuple(s // self.scale_factor for s in in_size)
class Hourglass(nn.Layer):
"""
A hourglass block.
Parameters:
----------
down_seq : nn.HybridSequential
Down modules as sequential.
up_seq : nn.HybridSequential
Up modules as sequential.
skip_seq : nn.HybridSequential
Skip connection modules as sequential.
merge_type : str, default 'add'
Type of concatenation of up and skip outputs.
return_first_skip : bool, default False
Whether return the first skip connection output. Used in ResAttNet.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
down_seq,
up_seq,
skip_seq,
merge_type="add",
return_first_skip=False,
data_format="channels_last",
**kwargs):
super(Hourglass, self).__init__(**kwargs)
self.depth = len(down_seq)
assert (merge_type in ["cat", "add"])
assert (len(up_seq) == self.depth)
assert (len(skip_seq) in (self.depth, self.depth + 1))
self.merge_type = merge_type
self.return_first_skip = return_first_skip
self.extra_skip = (len(skip_seq) == self.depth + 1)
self.axis = get_channel_axis(data_format)
self.down_seq = down_seq
self.up_seq = up_seq
self.skip_seq = skip_seq
def _merge(self, x, y):
if y is not None:
if self.merge_type == "cat":
x = tf.concat([x, y], axis=self.axis)
elif self.merge_type == "add":
x = x + y
return x
def call(self, x, training=None):
y = None
down_outs = [x]
for down_module in self.down_seq.children:
x = down_module(x, training=training)
down_outs.append(x)
for i in range(len(down_outs)):
if i != 0:
y = down_outs[self.depth - i]
skip_module = self.skip_seq[self.depth - i]
y = skip_module(y, training=training)
x = self._merge(x, y)
if i != len(down_outs) - 1:
if (i == 0) and self.extra_skip:
skip_module = self.skip_seq[self.depth]
x = skip_module(x, training=training)
up_module = self.up_seq[self.depth - 1 - i]
x = up_module(x, training=training)
if self.return_first_skip:
return x, y
else:
return x
class HeatmapMaxDetBlock(nn.Layer):
"""
Heatmap maximum detector block (for human pose estimation task).
Parameters:
----------
tune : bool, default True
Whether to tune point positions.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
tune=True,
data_format="channels_last",
**kwargs):
super(HeatmapMaxDetBlock, self).__init__(**kwargs)
self.tune = tune
self.data_format = data_format
def call(self, x, training=None):
# if not tf.executing_eagerly():
# channels = x.shape[1] if is_channels_first(self.data_format) else x.shape[3]
# return x[:, :channels, :3, 0]
vector_dim = 2
x_shape = x.get_shape().as_list()
batch = x_shape[0]
if is_channels_first(self.data_format):
channels = x_shape[1]
in_size = x_shape[2:]
heatmap_vector = tf.reshape(x, shape=(batch, channels, -1))
else:
channels = x_shape[3]
in_size = x_shape[1:3]
heatmap_vector = tf.reshape(x, shape=(batch, -1, channels))
heatmap_vector = tf.transpose(heatmap_vector, perm=(0, 2, 1))
indices = tf.cast(tf.expand_dims(tf.cast(tf.math.argmax(heatmap_vector, axis=vector_dim), np.int32),
axis=vector_dim), np.float32)
scores = tf.math.reduce_max(heatmap_vector, axis=vector_dim, keepdims=True)
scores_mask = tf.cast(tf.math.greater(scores, 0.0), np.float32)
pts_x = (indices % in_size[1]) * scores_mask
pts_y = (indices // in_size[1]) * scores_mask
pts = tf.concat([pts_x, pts_y, scores], axis=vector_dim)
if self.tune:
pts = pts.numpy()
for b in range(batch):
for k in range(channels):
hm = x[b, k, :, :] if is_channels_first(self.data_format) else x[b, :, :, k]
px = int(pts[b, k, 0])
py = int(pts[b, k, 1])
if (0 < px < in_size[1] - 1) and (0 < py < in_size[0] - 1):
pts[b, k, 0] += np.sign(hm[py, px + 1] - hm[py, px - 1]) * 0.25
pts[b, k, 1] += np.sign(hm[py + 1, px] - hm[py - 1, px]) * 0.25
pts = tf.convert_to_tensor(pts)
return pts
| 116,234 | 32.858142 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/lwopenpose_cmupan.py | """
Lightweight OpenPose 2D/3D for CMU Panoptic, implemented in TensorFlow.
Original paper: 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,'
https://arxiv.org/abs/1811.12004.
"""
__all__ = ['LwOpenPose', 'lwopenpose2d_mobilenet_cmupan_coco', 'lwopenpose3d_mobilenet_cmupan_coco',
'LwopDecoderFinalBlock']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv1x1_block, conv3x3_block, dwsconv3x3_block, SimpleSequential, is_channels_first,\
get_channel_axis
class LwopResBottleneck(nn.Layer):
"""
Bottleneck block for residual path in the residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default True
Whether the layer uses a bias vector.
bottleneck_factor : int, default 2
Bottleneck factor.
squeeze_out : bool, default False
Whether to squeeze the output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=True,
bottleneck_factor=2,
squeeze_out=False,
data_format="channels_last",
**kwargs):
super(LwopResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor if squeeze_out else in_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=use_bias,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
use_bias=use_bias,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=use_bias,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class LwopResUnit(nn.Layer):
"""
ResNet-like residual unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
use_bias : bool, default True
Whether the layer uses a bias vector.
bottleneck_factor : int, default 2
Bottleneck factor.
squeeze_out : bool, default False
Whether to squeeze the output channels.
activate : bool, default False
Whether to activate the sum.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides=1,
use_bias=True,
bottleneck_factor=2,
squeeze_out=False,
activate=False,
data_format="channels_last",
**kwargs):
super(LwopResUnit, self).__init__(**kwargs)
self.activate = activate
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = LwopResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
bottleneck_factor=bottleneck_factor,
squeeze_out=squeeze_out,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
activation=None,
data_format=data_format,
name="identity_conv")
if self.activate:
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
if self.activate:
x = self.activ(x)
return x
class LwopEncoderFinalBlock(nn.Layer):
"""
Lightweight OpenPose 2D/3D specific encoder final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(LwopEncoderFinalBlock, self).__init__(**kwargs)
self.pre_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
use_bn=False,
data_format=data_format,
name="pre_conv")
self.body = SimpleSequential(name="body")
for i in range(3):
self.body.add(dwsconv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
dw_use_bn=False,
pw_use_bn=False,
dw_activation=(lambda: nn.ELU()),
pw_activation=(lambda: nn.ELU()),
data_format=data_format,
name="block{}".format(i + 1)))
self.post_conv = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=True,
use_bn=False,
data_format=data_format,
name="post_conv")
def call(self, x, training=None):
x = self.pre_conv(x, training=training)
x = x + self.body(x, training=training)
x = self.post_conv(x, training=training)
return x
class LwopRefinementBlock(nn.Layer):
"""
Lightweight OpenPose 2D/3D specific refinement block for decoder units.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(LwopRefinementBlock, self).__init__(**kwargs)
self.pre_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
use_bn=False,
data_format=data_format,
name="pre_conv")
self.body = SimpleSequential(name="body")
self.body.add(conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name="block1"))
self.body.add(conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
padding=2,
dilation=2,
use_bias=True,
data_format=data_format,
name="block2"))
def call(self, x, training=None):
x = self.pre_conv(x, training=training)
# print("--> x.shape={}".format(x.shape))
y = self.body(x, training=training)
# print("==> x.shape={}".format(x.shape))
x = x + y
return x
class LwopDecoderBend(nn.Layer):
"""
Lightweight OpenPose 2D/3D specific decoder bend block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(LwopDecoderBend, self).__init__(**kwargs)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=True,
use_bn=False,
data_format=data_format,
name="conv1")
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class LwopDecoderInitBlock(nn.Layer):
"""
Lightweight OpenPose 2D/3D specific decoder init block.
Parameters:
----------
in_channels : int
Number of input channels.
keypoints : int
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
keypoints,
data_format="channels_last",
**kwargs):
super(LwopDecoderInitBlock, self).__init__(**kwargs)
self.data_format = data_format
num_heatmap = keypoints
num_paf = 2 * keypoints
bend_mid_channels = 512
self.body = SimpleSequential(name="body")
for i in range(3):
self.body.add(conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
use_bias=True,
use_bn=False,
data_format=data_format,
name="block{}".format(i + 1)))
self.heatmap_bend = LwopDecoderBend(
in_channels=in_channels,
mid_channels=bend_mid_channels,
out_channels=num_heatmap,
data_format=data_format,
name="heatmap_bend")
self.paf_bend = LwopDecoderBend(
in_channels=in_channels,
mid_channels=bend_mid_channels,
out_channels=num_paf,
data_format=data_format,
name="paf_bend")
def call(self, x, training=None):
y = self.body(x, training=training)
heatmap = self.heatmap_bend(y, training=training)
paf = self.paf_bend(y, training=training)
y = tf.concat((x, heatmap, paf), axis=get_channel_axis(self.data_format))
return y
class LwopDecoderUnit(nn.Layer):
"""
Lightweight OpenPose 2D/3D specific decoder init.
Parameters:
----------
in_channels : int
Number of input channels.
keypoints : int
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
keypoints,
data_format="channels_last",
**kwargs):
super(LwopDecoderUnit, self).__init__(**kwargs)
self.data_format = data_format
num_heatmap = keypoints
num_paf = 2 * keypoints
self.features_channels = in_channels - num_heatmap - num_paf
self.body = SimpleSequential(name="body")
for i in range(5):
self.body.add(LwopRefinementBlock(
in_channels=in_channels,
out_channels=self.features_channels,
data_format=data_format,
name="block{}".format(i + 1)))
in_channels = self.features_channels
self.heatmap_bend = LwopDecoderBend(
in_channels=self.features_channels,
mid_channels=self.features_channels,
out_channels=num_heatmap,
data_format=data_format,
name="heatmap_bend")
self.paf_bend = LwopDecoderBend(
in_channels=self.features_channels,
mid_channels=self.features_channels,
out_channels=num_paf,
data_format=data_format,
name="paf_bend")
def call(self, x, training=None):
if is_channels_first(self.data_format):
features = x[:, :self.features_channels]
else:
features = x[:, :, :, :self.features_channels]
y = self.body(x, training=training)
heatmap = self.heatmap_bend(y, training=training)
paf = self.paf_bend(y, training=training)
y = tf.concat((features, heatmap, paf), axis=get_channel_axis(self.data_format))
return y
class LwopDecoderFeaturesBend(nn.Layer):
"""
Lightweight OpenPose 2D/3D specific decoder 3D features bend.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(LwopDecoderFeaturesBend, self).__init__(**kwargs)
self.body = SimpleSequential(name="body")
for i in range(2):
self.body.add(LwopRefinementBlock(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="block{}".format(i + 1)))
in_channels = mid_channels
self.features_bend = LwopDecoderBend(
in_channels=mid_channels,
mid_channels=mid_channels,
out_channels=out_channels,
data_format=data_format,
name="features_bend")
def call(self, x, training=None):
x = self.body(x, training=training)
x = self.features_bend(x, training=training)
return x
class LwopDecoderFinalBlock(nn.Layer):
"""
Lightweight OpenPose 2D/3D specific decoder final block for calcualation 3D poses.
Parameters:
----------
in_channels : int
Number of input channels.
keypoints : int
Number of keypoints.
bottleneck_factor : int
Bottleneck factor.
calc_3d_features : bool
Whether to calculate 3D features.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
keypoints,
bottleneck_factor,
calc_3d_features,
data_format="channels_last",
**kwargs):
super(LwopDecoderFinalBlock, self).__init__(**kwargs)
self.data_format = data_format
self.num_heatmap_paf = 3 * keypoints
self.calc_3d_features = calc_3d_features
features_out_channels = self.num_heatmap_paf
features_in_channels = in_channels - features_out_channels
if self.calc_3d_features:
self.body = SimpleSequential(name="body")
for i in range(5):
self.body.add(LwopResUnit(
in_channels=in_channels,
out_channels=features_in_channels,
bottleneck_factor=bottleneck_factor,
data_format=data_format,
name="block{}".format(i + 1)))
in_channels = features_in_channels
self.features_bend = LwopDecoderFeaturesBend(
in_channels=features_in_channels,
mid_channels=features_in_channels,
out_channels=features_out_channels,
data_format=data_format,
name="features_bend")
def call(self, x, training=None):
if is_channels_first(self.data_format):
heatmap_paf_2d = x[:, -self.num_heatmap_paf:]
else:
heatmap_paf_2d = x[:, :, :, -self.num_heatmap_paf:]
if not self.calc_3d_features:
return heatmap_paf_2d
x = self.body(x, training=training)
x = self.features_bend(x, training=training)
y = tf.concat((heatmap_paf_2d, x), axis=get_channel_axis(self.data_format))
return y
class LwOpenPose(tf.keras.Model):
"""
Lightweight OpenPose 2D/3D model from 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,'
https://arxiv.org/abs/1811.12004.
Parameters:
----------
encoder_channels : list of list of int
Number of output channels for each encoder unit.
encoder_paddings : list of list of int
Padding/dilation value for each encoder unit.
encoder_init_block_channels : int
Number of output channels for the encoder initial unit.
encoder_final_block_channels : int
Number of output channels for the encoder final unit.
refinement_units : int
Number of refinement blocks in the decoder.
calc_3d_features : bool
Whether to calculate 3D features.
return_heatmap : bool, default True
Whether to return only heatmap.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 192)
Spatial size of the expected input image.
keypoints : int, default 19
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
encoder_channels,
encoder_paddings,
encoder_init_block_channels,
encoder_final_block_channels,
refinement_units,
calc_3d_features,
return_heatmap=True,
in_channels=3,
in_size=(368, 368),
keypoints=19,
data_format="channels_last",
**kwargs):
super(LwOpenPose, self).__init__(**kwargs)
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.data_format = data_format
self.return_heatmap = return_heatmap
self.calc_3d_features = calc_3d_features
num_heatmap_paf = 3 * keypoints
self.encoder = SimpleSequential(name="encoder")
backbone = SimpleSequential(name="backbone")
backbone.add(conv3x3_block(
in_channels=in_channels,
out_channels=encoder_init_block_channels,
strides=2,
data_format=data_format,
name="init_block"))
in_channels = encoder_init_block_channels
for i, channels_per_stage in enumerate(encoder_channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
padding = encoder_paddings[i][j]
stage.add(dwsconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=padding,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
backbone.add(stage)
self.encoder.add(backbone)
self.encoder.add(LwopEncoderFinalBlock(
in_channels=in_channels,
out_channels=encoder_final_block_channels,
data_format=data_format,
name="final_block"))
in_channels = encoder_final_block_channels
self.decoder = SimpleSequential(name="decoder")
self.decoder.add(LwopDecoderInitBlock(
in_channels=in_channels,
keypoints=keypoints,
data_format=data_format,
name="init_block"))
in_channels = encoder_final_block_channels + num_heatmap_paf
for i in range(refinement_units):
self.decoder.add(LwopDecoderUnit(
in_channels=in_channels,
keypoints=keypoints,
data_format=data_format,
name="unit{}".format(i + 1)))
self.decoder.add(LwopDecoderFinalBlock(
in_channels=in_channels,
keypoints=keypoints,
bottleneck_factor=2,
calc_3d_features=calc_3d_features,
data_format=data_format,
name="final_block"))
def call(self, x, training=None):
# print("**> x.shape={}".format(x.shape))
x = self.encoder(x, training=training)
x = self.decoder(x, training=training)
if self.return_heatmap:
return x
else:
return x
def get_lwopenpose(calc_3d_features,
keypoints,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create Lightweight OpenPose 2D/3D model with specific parameters.
Parameters:
----------
calc_3d_features : bool, default False
Whether to calculate 3D features.
keypoints : int
Number of keypoints.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
encoder_channels = [[64], [128, 128], [256, 256, 512, 512, 512, 512, 512, 512]]
encoder_paddings = [[1], [1, 1], [1, 1, 1, 2, 1, 1, 1, 1]]
encoder_init_block_channels = 32
encoder_final_block_channels = 128
refinement_units = 1
net = LwOpenPose(
encoder_channels=encoder_channels,
encoder_paddings=encoder_paddings,
encoder_init_block_channels=encoder_init_block_channels,
encoder_final_block_channels=encoder_final_block_channels,
refinement_units=refinement_units,
calc_3d_features=calc_3d_features,
keypoints=keypoints,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def lwopenpose2d_mobilenet_cmupan_coco(keypoints=19, data_format="channels_last", **kwargs):
"""
Lightweight OpenPose 2D model on the base of MobileNet for CMU Panoptic from 'Real-time 2D Multi-Person Pose
Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004.
Parameters:
----------
keypoints : int, default 19
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_lwopenpose(calc_3d_features=False, keypoints=keypoints, model_name="lwopenpose2d_mobilenet_cmupan_coco",
data_format=data_format, **kwargs)
def lwopenpose3d_mobilenet_cmupan_coco(keypoints=19, data_format="channels_last", **kwargs):
"""
Lightweight OpenPose 3D model on the base of MobileNet for CMU Panoptic from 'Real-time 2D Multi-Person Pose
Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004.
Parameters:
----------
keypoints : int, default 19
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_lwopenpose(calc_3d_features=True, keypoints=keypoints, model_name="lwopenpose3d_mobilenet_cmupan_coco",
data_format=data_format, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
# os.environ["TF_CUDNN_DETERMINISTIC"] = "1"
# os.environ["TF_DETERMINISTIC_OPS"] = "1"
data_format = "channels_last"
# data_format = "channels_first"
in_size_ = (368, 368)
keypoints = 19
return_heatmap = True
pretrained = False
models = [
(lwopenpose2d_mobilenet_cmupan_coco, "2d", in_size_),
(lwopenpose3d_mobilenet_cmupan_coco, "3d", in_size_),
]
for model, model_dim, in_size in models:
net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
y = net(x)
assert (y.shape[0] == batch)
keypoints_ = 3 * keypoints if model_dim == "2d" else 6 * keypoints
if is_channels_first(data_format):
assert ((y.shape[1] == keypoints_) and (y.shape[2] == x.shape[2] // 8) and
(y.shape[3] == x.shape[3] // 8))
else:
assert ((y.shape[3] == keypoints_) and (y.shape[1] == x.shape[1] // 8) and
(y.shape[2] == x.shape[2] // 8))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != lwopenpose2d_mobilenet_cmupan_coco or weight_count == 4091698)
assert (model != lwopenpose3d_mobilenet_cmupan_coco or weight_count == 5085983)
if __name__ == "__main__":
_test()
| 26,896 | 34.344284 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/jasperdr.py | """
Jasper DR (Dense Residual) for ASR, implemented in TensorFlow.
Original paper: 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288.
"""
__all__ = ['jasperdr10x5_en', 'jasperdr10x5_en_nr']
from .jasper import get_jasper
from .common import is_channels_first
def jasperdr10x5_en(classes=29, **kwargs):
"""
Jasper DR 10x5 model for English language from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_jasper(classes=classes, version=("jasper", "10x5"), use_dr=True, model_name="jasperdr10x5_en",
**kwargs)
def jasperdr10x5_en_nr(classes=29, **kwargs):
"""
Jasper DR 10x5 model for English language (with presence of noise) from 'Jasper: An End-to-End Convolutional Neural
Acoustic Model,' https://arxiv.org/abs/1904.03288.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_jasper(classes=classes, version=("jasper", "10x5"), use_dr=True, model_name="jasperdr10x5_en_nr",
**kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
import tensorflow as tf
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
audio_features = 64
classes = 29
models = [
jasperdr10x5_en,
jasperdr10x5_en_nr,
]
for model in models:
net = model(
in_channels=audio_features,
pretrained=pretrained,
data_format=data_format)
batch = 3
seq_len = np.random.randint(60, 150, batch)
seq_len_max = seq_len.max() + 2
x = tf.random.normal((batch, audio_features, seq_len_max) if is_channels_first(data_format) else
(batch, seq_len_max, audio_features))
x_len = tf.convert_to_tensor(seq_len.astype(np.long))
y, y_len = net(x, x_len)
assert (y.shape.as_list()[0] == batch)
if is_channels_first(data_format):
assert (y.shape.as_list()[1] == classes)
assert (y.shape.as_list()[2] in [seq_len_max // 2, seq_len_max // 2 + 1])
else:
assert (y.shape.as_list()[1] in [seq_len_max // 2, seq_len_max // 2 + 1])
assert (y.shape.as_list()[2] == classes)
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != jasperdr10x5_en or weight_count == 332632349)
assert (model != jasperdr10x5_en_nr or weight_count == 332632349)
if __name__ == "__main__":
_test()
| 3,268 | 33.410526 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/deeplabv3.py | """
DeepLabv3 for image segmentation, implemented in TensorFlow.
Original paper: 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587.
"""
__all__ = ['DeepLabv3', 'deeplabv3_resnetd50b_voc', 'deeplabv3_resnetd101b_voc', 'deeplabv3_resnetd152b_voc',
'deeplabv3_resnetd50b_coco', 'deeplabv3_resnetd101b_coco', 'deeplabv3_resnetd152b_coco',
'deeplabv3_resnetd50b_ade20k', 'deeplabv3_resnetd101b_ade20k', 'deeplabv3_resnetd50b_cityscapes',
'deeplabv3_resnetd101b_cityscapes']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent, is_channels_first, interpolate_im,\
get_im_size
from .resnetd import resnetd50b, resnetd101b, resnetd152b
class DeepLabv3FinalBlock(nn.Layer):
"""
DeepLabv3 final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_factor=4,
data_format="channels_last",
**kwargs):
super(DeepLabv3FinalBlock, self).__init__(**kwargs)
assert (in_channels % bottleneck_factor == 0)
self.data_format = data_format
mid_channels = in_channels // bottleneck_factor
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.dropout = nn.Dropout(
rate=0.1,
name="dropout")
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name="conv2")
def call(self, x, out_size, training=None):
x = self.conv1(x, training=training)
x = self.dropout(x, training=training)
x = self.conv2(x)
x = interpolate_im(x, out_size=out_size, data_format=self.data_format)
return x
class ASPPAvgBranch(nn.Layer):
"""
ASPP branch with average pooling.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
upscale_out_size : tuple of 2 int or None
Spatial size of output image for the bilinear upsampling operation.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
upscale_out_size,
data_format="channels_last",
**kwargs):
super(ASPPAvgBranch, self).__init__(**kwargs)
self.upscale_out_size = upscale_out_size
self.data_format = data_format
self.pool = nn.AveragePooling2D(
pool_size=1,
data_format=data_format,
name="pool")
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv")
def call(self, x, training=None):
in_size = self.upscale_out_size if self.upscale_out_size is not None else\
get_im_size(x, data_format=self.data_format)
x = self.pool(x)
x = self.conv(x, training=training)
x = interpolate_im(x, out_size=in_size, data_format=self.data_format)
return x
class AtrousSpatialPyramidPooling(nn.Layer):
"""
Atrous Spatial Pyramid Pooling (ASPP) module.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
upscale_out_size,
data_format="channels_last",
**kwargs):
super(AtrousSpatialPyramidPooling, self).__init__(**kwargs)
atrous_rates = [12, 24, 36]
assert (in_channels % 8 == 0)
mid_channels = in_channels // 8
project_in_channels = 5 * mid_channels
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="branch1"))
for i, atrous_rate in enumerate(atrous_rates):
self.branches.add(conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
padding=atrous_rate,
dilation=atrous_rate,
data_format=data_format,
name="branch{}".format(i + 2)))
self.branches.add(ASPPAvgBranch(
in_channels=in_channels,
out_channels=mid_channels,
upscale_out_size=upscale_out_size,
data_format=data_format,
name="branch5"))
self.conv = conv1x1_block(
in_channels=project_in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv")
self.dropout = nn.Dropout(
rate=0.5,
name="dropout")
def call(self, x, training=None):
x = self.branches(x, training=training)
x = self.conv(x, training=training)
x = self.dropout(x, training=training)
return x
class DeepLabv3(tf.keras.Model):
"""
DeepLabv3 model from 'Rethinking Atrous Convolution for Semantic Image Segmentation,'
https://arxiv.org/abs/1706.05587.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21,
data_format="channels_last",
**kwargs):
super(DeepLabv3, self).__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
self.data_format = data_format
self.backbone = backbone
pool_out_size = (self.in_size[0] // 8, self.in_size[1] // 8) if fixed_size else None
self.pool = AtrousSpatialPyramidPooling(
in_channels=backbone_out_channels,
upscale_out_size=pool_out_size,
data_format=data_format,
name="pool")
pool_out_channels = backbone_out_channels // 8
self.final_block = DeepLabv3FinalBlock(
in_channels=pool_out_channels,
out_channels=classes,
bottleneck_factor=1,
data_format=data_format,
name="final_block")
if self.aux:
aux_out_channels = backbone_out_channels // 2
self.aux_block = DeepLabv3FinalBlock(
in_channels=aux_out_channels,
out_channels=classes,
bottleneck_factor=4,
data_format=data_format,
name="aux_block")
def call(self, x, training=None):
in_size = self.in_size if self.fixed_size else get_im_size(x, data_format=self.data_format)
x, y = self.backbone(x, training=training)
x = self.pool(x, training=training)
x = self.final_block(x, in_size, training=training)
if self.aux:
y = self.aux_block(y, in_size, training=training)
return x, y
else:
return x
def get_deeplabv3(backbone,
classes,
aux=False,
model_name=None,
data_format="channels_last",
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DeepLabv3 model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = DeepLabv3(
backbone=backbone,
classes=classes,
aux=aux,
data_format=data_format,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
by_name=True,
skip_mismatch=True)
return net
def deeplabv3_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_voc",
data_format=data_format, **kwargs)
def deeplabv3_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_voc",
data_format=data_format, **kwargs)
def deeplabv3_resnetd152b_voc(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-152b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd152b_voc",
data_format=data_format, **kwargs)
def deeplabv3_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_coco",
data_format=data_format, **kwargs)
def deeplabv3_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_coco",
data_format=data_format, **kwargs)
def deeplabv3_resnetd152b_coco(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-152b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd152b_coco",
data_format=data_format, **kwargs)
def deeplabv3_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, data_format="channels_last",
**kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_ade20k",
data_format=data_format, **kwargs)
def deeplabv3_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, data_format="channels_last",
**kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_ade20k",
data_format=data_format, **kwargs)
def deeplabv3_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, data_format="channels_last",
**kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_cityscapes",
data_format=data_format, **kwargs)
def deeplabv3_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, data_format="channels_last",
**kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_cityscapes",
data_format=data_format, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
in_size = (480, 480)
aux = False
pretrained = False
models = [
(deeplabv3_resnetd50b_voc, 21),
(deeplabv3_resnetd101b_voc, 21),
(deeplabv3_resnetd152b_voc, 21),
(deeplabv3_resnetd50b_coco, 21),
(deeplabv3_resnetd101b_coco, 21),
(deeplabv3_resnetd152b_coco, 21),
(deeplabv3_resnetd50b_ade20k, 150),
(deeplabv3_resnetd101b_ade20k, 150),
(deeplabv3_resnetd50b_cityscapes, 19),
(deeplabv3_resnetd101b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
ys = net(x)
y = ys[0] if aux else ys
assert (y.shape[0] == x.shape[0])
if is_channels_first(data_format):
assert ((y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and (y.shape[3] == x.shape[3]))
else:
assert ((y.shape[3] == classes) and (y.shape[1] == x.shape[1]) and (y.shape[2] == x.shape[2]))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != deeplabv3_resnetd50b_voc or weight_count == 42127850)
assert (model != deeplabv3_resnetd101b_voc or weight_count == 61119978)
assert (model != deeplabv3_resnetd152b_voc or weight_count == 76763626)
assert (model != deeplabv3_resnetd50b_coco or weight_count == 42127850)
assert (model != deeplabv3_resnetd101b_coco or weight_count == 61119978)
assert (model != deeplabv3_resnetd152b_coco or weight_count == 76763626)
assert (model != deeplabv3_resnetd50b_ade20k or weight_count == 42194156)
assert (model != deeplabv3_resnetd101b_ade20k or weight_count == 61186284)
assert (model != deeplabv3_resnetd50b_cityscapes or weight_count == 42126822)
assert (model != deeplabv3_resnetd101b_cityscapes or weight_count == 61118950)
else:
assert (model != deeplabv3_resnetd50b_voc or weight_count == 39762645)
assert (model != deeplabv3_resnetd101b_voc or weight_count == 58754773)
assert (model != deeplabv3_resnetd152b_voc or weight_count == 74398421)
assert (model != deeplabv3_resnetd50b_coco or weight_count == 39762645)
assert (model != deeplabv3_resnetd101b_coco or weight_count == 58754773)
assert (model != deeplabv3_resnetd152b_coco or weight_count == 74398421)
assert (model != deeplabv3_resnetd50b_ade20k or weight_count == 39795798)
assert (model != deeplabv3_resnetd101b_ade20k or weight_count == 58787926)
assert (model != deeplabv3_resnetd50b_cityscapes or weight_count == 39762131)
assert (model != deeplabv3_resnetd101b_cityscapes or weight_count == 58754259)
if __name__ == "__main__":
_test()
| 26,559 | 40.178295 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/fpenet.py | """
FPENet for image segmentation, implemented in TensorFlow.
Original paper: 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1909.08599.
"""
__all__ = ['FPENet', 'fpenet_cityscapes']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv1x1_block, conv3x3_block, SEBlock, InterpolationBlock, MultiOutputSequential,\
SimpleSequential, is_channels_first, get_channel_axis
class FPEBlock(nn.Layer):
"""
FPENet block.
Parameters:
----------
channels : int
Number of input/output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
data_format="channels_last",
**kwargs):
super(FPEBlock, self).__init__(**kwargs)
self.axis = get_channel_axis(data_format)
dilations = [1, 2, 4, 8]
assert (channels % len(dilations) == 0)
mid_channels = channels // len(dilations)
self.blocks = SimpleSequential(name="blocks")
for i, dilation in enumerate(dilations):
self.blocks.add(conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
groups=mid_channels,
dilation=dilation,
padding=dilation,
data_format=data_format,
name="block{}".format(i + 1)))
def call(self, x, training=None):
xs = tf.split(x, num_or_size_splits=len(self.blocks.children), axis=self.axis)
ys = []
for bi, xsi in zip(self.blocks.children, xs):
if len(ys) == 0:
ys.append(bi(xsi, training=training))
else:
ys.append(bi(xsi + ys[-1], training=training))
x = tf.concat(ys, axis=self.axis)
return x
class FPEUnit(nn.Layer):
"""
FPENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck_factor : int
Bottleneck factor.
use_se : bool
Whether to use SE-module.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck_factor,
use_se,
data_format="channels_last",
**kwargs):
super(FPEUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.use_se = use_se
mid1_channels = in_channels * bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid1_channels,
strides=strides,
data_format=data_format,
name="conv1")
self.block = FPEBlock(
channels=mid1_channels,
data_format=data_format,
name="blocks")
self.conv2 = conv1x1_block(
in_channels=mid1_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv2")
if self.use_se:
self.se = SEBlock(
channels=out_channels,
data_format=data_format,
name="se")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.conv1(x, training=training)
x = self.block(x, training=training)
x = self.conv2(x, training=training)
if self.use_se:
x = self.se(x, training=training)
x = x + identity
x = self.activ(x)
return x
class FPEStage(nn.Layer):
"""
FPENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
layers : int
Number of layers.
use_se : bool
Whether to use SE-module.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
layers,
use_se,
data_format="channels_last",
**kwargs):
super(FPEStage, self).__init__(**kwargs)
self.use_block = (layers > 1)
if self.use_block:
self.down = FPEUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bottleneck_factor=4,
use_se=use_se,
data_format=data_format,
name="down")
self.blocks = SimpleSequential(name="blocks")
for i in range(layers - 1):
self.blocks.add(FPEUnit(
in_channels=out_channels,
out_channels=out_channels,
strides=1,
bottleneck_factor=1,
use_se=use_se,
data_format=data_format,
name="block{}".format(i + 1)))
else:
self.down = FPEUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=1,
bottleneck_factor=1,
use_se=use_se,
data_format=data_format,
name="down")
def call(self, x, training=None):
x = self.down(x, training=training)
if self.use_block:
y = self.blocks(x, training=training)
x = x + y
return x
class MEUBlock(nn.Layer):
"""
FPENet specific mutual embedding upsample (MEU) block.
Parameters:
----------
in_channels_high : int
Number of input channels for x_high.
in_channels_low : int
Number of input channels for x_low.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels_high,
in_channels_low,
out_channels,
data_format="channels_last",
**kwargs):
super(MEUBlock, self).__init__(**kwargs)
self.data_format = data_format
self.axis = get_channel_axis(data_format)
self.conv_high = conv1x1_block(
in_channels=in_channels_high,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv_high")
self.conv_low = conv1x1_block(
in_channels=in_channels_low,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv_low")
self.pool = nn.GlobalAveragePooling2D(
data_format=data_format,
name="pool")
self.conv_w_high = conv1x1(
in_channels=out_channels,
out_channels=out_channels,
data_format=data_format,
name="conv_w_high")
self.conv_w_low = conv1x1(
in_channels=1,
out_channels=1,
data_format=data_format,
name="conv_w_low")
self.relu = nn.ReLU()
self.up = InterpolationBlock(
scale_factor=2,
data_format=data_format,
name="up")
def call(self, x_high, x_low, training=None):
x_high = self.conv_high(x_high, training=training)
x_low = self.conv_low(x_low, training=training)
w_high = self.pool(x_high)
axis = -1 if is_channels_first(self.data_format) else 1
w_high = tf.expand_dims(tf.expand_dims(w_high, axis=axis), axis=axis)
w_high = self.conv_w_high(w_high)
w_high = self.relu(w_high)
w_high = tf.nn.sigmoid(w_high)
w_low = tf.math.reduce_mean(x_low, axis=self.axis, keepdims=True)
w_low = self.conv_w_low(w_low)
w_low = tf.nn.sigmoid(w_low)
x_high = self.up(x_high)
x_high = x_high * w_low
x_low = x_low * w_high
out = x_high + x_low
return out
class FPENet(tf.keras.Model):
"""
FPENet model from 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1909.08599.
Parameters:
----------
layers : list of int
Number of layers for each unit.
channels : list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
meu_channels : list of int
Number of output channels for MEU blocks.
use_se : bool
Whether to use SE-module.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
layers,
channels,
init_block_channels,
meu_channels,
use_se,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
classes=19,
data_format="channels_last",
**kwargs):
super(FPENet, self).__init__(**kwargs)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
self.data_format = data_format
self.stem = conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
data_format=data_format,
name="stem")
in_channels = init_block_channels
self.encoder = MultiOutputSequential(
return_last=False,
name="encoder")
for i, (layers_i, out_channels) in enumerate(zip(layers, channels)):
stage = FPEStage(
in_channels=in_channels,
out_channels=out_channels,
layers=layers_i,
use_se=use_se,
data_format=data_format,
name="stage{}".format(i + 1))
stage.do_output = True
self.encoder.add(stage)
in_channels = out_channels
self.meu1 = MEUBlock(
in_channels_high=channels[-1],
in_channels_low=channels[-2],
out_channels=meu_channels[0],
data_format=data_format,
name="meu1")
self.meu2 = MEUBlock(
in_channels_high=meu_channels[0],
in_channels_low=channels[-3],
out_channels=meu_channels[1],
data_format=data_format,
name="meu2")
in_channels = meu_channels[1]
self.classifier = conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True,
data_format=data_format,
name="classifier")
self.up = InterpolationBlock(
scale_factor=2,
data_format=data_format,
name="up")
def call(self, x, training=None):
x = self.stem(x, training=training)
y = self.encoder(x, training=training)
x = self.meu1(y[2], y[1], training=training)
x = self.meu2(x, y[0], training=training)
x = self.classifier(x)
x = self.up(x)
return x
def get_fpenet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create FPENet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
width = 16
channels = [int(width * (2 ** i)) for i in range(3)]
init_block_channels = width
layers = [1, 3, 9]
meu_channels = [64, 32]
use_se = False
net = FPENet(
layers=layers,
channels=channels,
init_block_channels=init_block_channels,
meu_channels=meu_channels,
use_se=use_se,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
by_name=True,
skip_mismatch=True)
return net
def fpenet_cityscapes(classes=19, **kwargs):
"""
FPENet model for Cityscapes from 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1909.08599.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_fpenet(classes=classes, model_name="fpenet_cityscapes", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
in_size = (1024, 2048)
classes = 19
models = [
fpenet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, data_format=data_format)
batch = 4
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, classes, in_size[0], in_size[1]) if is_channels_first(data_format)
else tuple(y.shape.as_list()) == (batch, in_size[0], in_size[1], classes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fpenet_cityscapes or weight_count == 115125)
if __name__ == "__main__":
_test()
| 15,897 | 31.378819 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/fastseresnet.py | """
Fast-SE-ResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['FastSEResNet', 'fastseresnet101b']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, SEBlock, SimpleSequential, flatten
from .resnet import ResBlock, ResBottleneck, ResInitBlock
class FastSEResUnit(nn.Layer):
"""
Fast-SE-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
use_se : bool
Whether to use SE-module.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck,
conv1_stride,
use_se,
data_format="channels_last",
**kwargs):
super(FastSEResUnit, self).__init__(**kwargs)
self.use_se = use_se
self.resize_identity = (in_channels != out_channels) or (strides != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
data_format=data_format,
name="body")
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="body")
if self.use_se:
self.se = SEBlock(
channels=out_channels,
reduction=1,
use_conv=False,
data_format=data_format,
name="se")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
if self.use_se:
x = self.se(x)
x = x + identity
x = self.activ(x)
return x
class FastSEResNet(tf.keras.Model):
"""
Fast-SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(FastSEResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
use_se = (j == 0)
stage.add(FastSEResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
use_se=use_se,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_fastseresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create Fast-SE-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported Fast-SE-ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = FastSEResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def fastseresnet101b(**kwargs):
"""
Fast-SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_fastseresnet(blocks=101, conv1_stride=False, model_name="fastseresnet101b", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
fastseresnet101b,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
# assert (model != fastseresnet101b or weight_count == 55697960)
if __name__ == "__main__":
_test()
| 10,194 | 31.887097 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/ibnbresnet.py | """
IBN(b)-ResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
"""
__all__ = ['IBNbResNet', 'ibnb_resnet50', 'ibnb_resnet101', 'ibnb_resnet152']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import MaxPool2d, InstanceNorm, Conv2d, conv1x1_block, SimpleSequential, flatten, is_channels_first
from .resnet import ResBottleneck
class IBNbConvBlock(nn.Layer):
"""
IBN(b)-ResNet specific convolution block with Instance normalization and ReLU activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
activate : bool, default True
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
activate=True,
data_format="channels_last",
**kwargs):
super(IBNbConvBlock, self).__init__(**kwargs)
self.activate = activate
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name="conv")
self.inst_norm = InstanceNorm(
scale=True,
data_format=data_format,
name="inst_norm")
if self.activate:
self.activ = nn.ReLU()
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.inst_norm(x, training=training)
if self.activate:
x = self.activ(x)
return x
def ibnb_conv7x7_block(in_channels,
out_channels,
strides=1,
padding=3,
use_bias=False,
activate=True,
data_format="channels_last",
**kwargs):
"""
7x7 version of the IBN(b)-ResNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
activate : bool, default True
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return IBNbConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
padding=padding,
use_bias=use_bias,
activate=activate,
data_format=data_format,
**kwargs)
class IBNbResUnit(nn.Layer):
"""
IBN(b)-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_inst_norm : bool
Whether to use instance normalization.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_inst_norm,
data_format="channels_last",
**kwargs):
super(IBNbResUnit, self).__init__(**kwargs)
self.use_inst_norm = use_inst_norm
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=False,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
if self.use_inst_norm:
self.inst_norm = InstanceNorm(
scale=True,
data_format=data_format,
name="inst_norm")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
if self.use_inst_norm:
x = self.inst_norm(x, training=training)
x = self.activ(x)
return x
class IBNbResInitBlock(nn.Layer):
"""
IBN(b)-ResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(IBNbResInitBlock, self).__init__(**kwargs)
self.conv = ibnb_conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
data_format=data_format,
name="conv")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.pool(x)
return x
class IBNbResNet(tf.keras.Model):
"""
IBN(b)-ResNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(IBNbResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(IBNbResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
use_inst_norm = (i < 2) and (j == len(channels_per_stage) - 1)
stage.add(IBNbResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_inst_norm=use_inst_norm,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_ibnbresnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create IBN(b)-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported IBN(b)-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = IBNbResNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def ibnb_resnet50(**kwargs):
"""
IBN(b)-ResNet-50 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ibnbresnet(blocks=50, model_name="ibnb_resnet50", **kwargs)
def ibnb_resnet101(**kwargs):
"""
IBN(b)-ResNet-101 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ibnbresnet(blocks=101, model_name="ibnb_resnet101", **kwargs)
def ibnb_resnet152(**kwargs):
"""
IBN(b)-ResNet-152 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ibnbresnet(blocks=152, model_name="ibnb_resnet152", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
ibnb_resnet50,
ibnb_resnet101,
ibnb_resnet152,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibnb_resnet50 or weight_count == 25558568)
assert (model != ibnb_resnet101 or weight_count == 44550696)
assert (model != ibnb_resnet152 or weight_count == 60194344)
if __name__ == "__main__":
_test()
| 13,824 | 31.377049 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/polynet.py | """
PolyNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,'
https://arxiv.org/abs/1611.05725.
"""
__all__ = ['PolyNet', 'polynet']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import MaxPool2d, Conv2d, ConvBlock, BatchNorm, SimpleSequential, ParametricSequential, Concurrent,\
ParametricConcurrent, conv1x1_block, conv3x3_block, flatten, is_channels_first
class PolyConv(nn.Layer):
"""
PolyNet specific convolution block. A block that is used inside poly-N (poly-2, poly-3, and so on) modules.
The Convolution layer is shared between all Inception blocks inside a poly-N module. BatchNorm layers are not
shared between Inception blocks and therefore the number of BatchNorm layers is equal to the number of Inception
blocks inside a poly-N module.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
num_blocks : int
Number of blocks (BatchNorm layers).
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
num_blocks,
data_format="channels_last",
**kwargs):
super(PolyConv, self).__init__(**kwargs)
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
data_format=data_format,
name="conv")
self.bns = []
for i in range(num_blocks):
self.bns.append(BatchNorm(
data_format=data_format,
name="bn{}".format(i + 1)))
self.activ = nn.ReLU()
def call(self, x, index, training=None):
x = self.conv(x)
x = self.bns[index](x)
x = self.activ(x)
return x
def poly_conv1x1(in_channels,
out_channels,
num_blocks,
data_format="channels_last",
**kwargs):
"""
1x1 version of the PolyNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
num_blocks : int
Number of blocks (BatchNorm layers).
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return PolyConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=1,
padding=0,
num_blocks=num_blocks,
data_format=data_format,
**kwargs)
class MaxPoolBranch(nn.Layer):
"""
PolyNet specific max pooling branch block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(MaxPoolBranch, self).__init__(**kwargs)
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=0,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.pool(x)
return x
class Conv1x1Branch(nn.Layer):
"""
PolyNet specific convolutional 1x1 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(Conv1x1Branch, self).__init__(**kwargs)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.conv(x, training=training)
return x
class Conv3x3Branch(nn.Layer):
"""
PolyNet specific convolutional 3x3 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(Conv3x3Branch, self).__init__(**kwargs)
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=0,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.conv(x, training=training)
return x
class ConvSeqBranch(nn.Layer):
"""
PolyNet specific convolutional sequence branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of tuple of int
List of numbers of output channels.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
kernel_size_list,
strides_list,
padding_list,
data_format="channels_last",
**kwargs):
super(ConvSeqBranch, self).__init__(**kwargs)
assert (len(out_channels_list) == len(kernel_size_list))
assert (len(out_channels_list) == len(strides_list))
assert (len(out_channels_list) == len(padding_list))
self.conv_list = SimpleSequential(name="conv_list")
for i, (out_channels, kernel_size, strides, padding) in enumerate(zip(
out_channels_list, kernel_size_list, strides_list, padding_list)):
self.conv_list.add(ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
name="conv{}".format(i + 1)))
in_channels = out_channels
def call(self, x, training=None):
x = self.conv_list(x, training=training)
return x
class PolyConvSeqBranch(nn.Layer):
"""
PolyNet specific convolutional sequence branch block with internal PolyNet specific convolution blocks.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of tuple of int
List of numbers of output channels.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
num_blocks : int
Number of blocks for PolyConv.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
kernel_size_list,
strides_list,
padding_list,
num_blocks,
data_format="channels_last",
**kwargs):
super(PolyConvSeqBranch, self).__init__(**kwargs)
assert (len(out_channels_list) == len(kernel_size_list))
assert (len(out_channels_list) == len(strides_list))
assert (len(out_channels_list) == len(padding_list))
self.conv_list = ParametricSequential(name="conv_list")
for i, (out_channels, kernel_size, strides, padding) in enumerate(zip(
out_channels_list, kernel_size_list, strides_list, padding_list)):
self.conv_list.add(PolyConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
num_blocks=num_blocks,
data_format=data_format,
name="conv{}".format(i + 1)))
in_channels = out_channels
def call(self, x, index, training=None):
x = self.conv_list(x, index=index, training=training)
return x
class TwoWayABlock(nn.Layer):
"""
PolyNet type Inception-A block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(TwoWayABlock, self).__init__(**kwargs)
in_channels = 384
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(32, 48, 64),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
data_format=data_format,
name="branch1"))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(32, 32),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
data_format=data_format,
name="branch2"))
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=32,
data_format=data_format,
name="branch3"))
self.conv = conv1x1_block(
in_channels=128,
out_channels=in_channels,
activation=None,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.branches(x, training=training)
x = self.conv(x, training=training)
return x
class TwoWayBBlock(nn.Layer):
"""
PolyNet type Inception-B block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(TwoWayBBlock, self).__init__(**kwargs)
in_channels = 1152
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(128, 160, 192),
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
data_format=data_format,
name="branch1"))
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=192,
data_format=data_format,
name="branch2"))
self.conv = conv1x1_block(
in_channels=384,
out_channels=in_channels,
activation=None,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.branches(x, training=training)
x = self.conv(x, training=training)
return x
class TwoWayCBlock(nn.Layer):
"""
PolyNet type Inception-C block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(TwoWayCBlock, self).__init__(**kwargs)
in_channels = 2048
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, (1, 3), (3, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 1), (1, 0)),
data_format=data_format,
name="branch1"))
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=192,
data_format=data_format,
name="branch2"))
self.conv = conv1x1_block(
in_channels=448,
out_channels=in_channels,
activation=None,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.branches(x, training=training)
x = self.conv(x, training=training)
return x
class PolyPreBBlock(nn.Layer):
"""
PolyNet type PolyResidual-Pre-B block.
Parameters:
----------
num_blocks : int
Number of blocks (BatchNorm layers).
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
num_blocks,
data_format="channels_last",
**kwargs):
super(PolyPreBBlock, self).__init__(**kwargs)
in_channels = 1152
self.branches = ParametricConcurrent(
data_format=data_format,
name="branches")
self.branches.add(PolyConvSeqBranch(
in_channels=in_channels,
out_channels_list=(128, 160, 192),
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
num_blocks=num_blocks,
data_format=data_format,
name="branch1"))
self.branches.add(poly_conv1x1(
in_channels=in_channels,
out_channels=192,
num_blocks=num_blocks,
data_format=data_format,
name="branch2"))
def call(self, x, index, training=None):
x = self.branches(x, index=index, training=training)
return x
class PolyPreCBlock(nn.Layer):
"""
PolyNet type PolyResidual-Pre-C block.
Parameters:
----------
num_blocks : int
Number of blocks (BatchNorm layers).
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
num_blocks,
data_format="channels_last",
**kwargs):
super(PolyPreCBlock, self).__init__(**kwargs)
in_channels = 2048
self.branches = ParametricConcurrent(
data_format=data_format,
name="branches")
self.branches.add(PolyConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, (1, 3), (3, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 1), (1, 0)),
num_blocks=num_blocks,
data_format=data_format,
name="branch1"))
self.branches.add(poly_conv1x1(
in_channels=in_channels,
out_channels=192,
num_blocks=num_blocks,
data_format=data_format,
name="branch2"))
def call(self, x, index, training=None):
x = self.branches(x, index=index, training=training)
return x
def poly_res_b_block(data_format="channels_last",
**kwargs):
"""
PolyNet type PolyResidual-Res-B block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return conv1x1_block(
in_channels=384,
out_channels=1152,
strides=1,
activation=None,
data_format=data_format,
**kwargs)
def poly_res_c_block(data_format="channels_last",
**kwargs):
"""
PolyNet type PolyResidual-Res-C block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return conv1x1_block(
in_channels=448,
out_channels=2048,
strides=1,
activation=None,
data_format=data_format,
**kwargs)
class MultiResidual(nn.Layer):
"""
Base class for constructing N-way modules (2-way, 3-way, and so on). Actually it is for 2-way modules.
Parameters:
----------
scale : float, default 1.0
Scale value for each residual branch.
res_block : HybridBlock class
Residual branch block.
num_blocks : int
Number of residual branches.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
scale,
res_block,
num_blocks,
data_format="channels_last",
**kwargs):
super(MultiResidual, self).__init__(**kwargs)
assert (num_blocks >= 1)
self.scale = scale
self.num_blocks = num_blocks
self.res_blocks = [res_block(
data_format=data_format,
name="res_block{}".format(i + 1)) for i in range(num_blocks)]
self.activ = nn.ReLU()
def call(self, x, training=None):
out = x
for res_block in self.res_blocks:
out = out + self.scale * res_block(x, training=training)
out = self.activ(out)
return out
class PolyResidual(nn.Layer):
"""
The other base class for constructing N-way poly-modules. Actually it is for 3-way poly-modules.
Parameters:
----------
scale : float, default 1.0
Scale value for each residual branch.
res_block : HybridBlock class
Residual branch block.
num_blocks : int
Number of residual branches.
pre_block : HybridBlock class
Preliminary block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
scale,
res_block,
num_blocks,
pre_block,
data_format="channels_last",
**kwargs):
super(PolyResidual, self).__init__(**kwargs)
assert (num_blocks >= 1)
self.scale = scale
self.pre_block = pre_block(
num_blocks=num_blocks,
data_format=data_format,
name="pre_block")
self.res_blocks = [res_block(
data_format=data_format,
name="res_block{}".format(i + 1)) for i in range(num_blocks)]
self.activ = nn.ReLU()
def call(self, x, training=None):
out = x
for index, res_block in enumerate(self.res_blocks):
x = self.pre_block(x, index, training=training)
x = res_block(x, training=training)
out = out + self.scale * x
x = self.activ(x)
out = self.activ(out)
return out
class PolyBaseUnit(nn.Layer):
"""
PolyNet unit base class.
Parameters:
----------
two_way_scale : float
Scale value for 2-way stage.
two_way_block : HybridBlock class
Residual branch block for 2-way-stage.
poly_scale : float, default 0.0
Scale value for 2-way stage.
poly_res_block : HybridBlock class, default None
Residual branch block for poly-stage.
poly_pre_block : HybridBlock class, default None
Preliminary branch block for poly-stage.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
two_way_scale,
two_way_block,
poly_scale=0.0,
poly_res_block=None,
poly_pre_block=None,
data_format="channels_last",
**kwargs):
super(PolyBaseUnit, self).__init__(**kwargs)
if poly_res_block is not None:
assert (poly_scale != 0.0)
assert (poly_pre_block is not None)
self.poly = PolyResidual(
scale=poly_scale,
res_block=poly_res_block,
num_blocks=3,
pre_block=poly_pre_block,
data_format=data_format,
name="poly")
else:
assert (poly_scale == 0.0)
assert (poly_pre_block is None)
self.poly = None
self.twoway = MultiResidual(
scale=two_way_scale,
res_block=two_way_block,
num_blocks=2,
data_format=data_format,
name="twoway")
def call(self, x, training=None):
if self.poly is not None:
x = self.poly(x, training=training)
x = self.twoway(x, training=training)
return x
class PolyAUnit(PolyBaseUnit):
"""
PolyNet type A unit.
Parameters:
----------
two_way_scale : float
Scale value for 2-way stage.
poly_scale : float
Scale value for 2-way stage.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
two_way_scale,
poly_scale=0.0,
data_format="channels_last",
**kwargs):
super(PolyAUnit, self).__init__(
two_way_scale=two_way_scale,
two_way_block=TwoWayABlock,
data_format=data_format,
**kwargs)
assert (poly_scale == 0.0)
class PolyBUnit(PolyBaseUnit):
"""
PolyNet type B unit.
Parameters:
----------
two_way_scale : float
Scale value for 2-way stage.
poly_scale : float
Scale value for 2-way stage.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
two_way_scale,
poly_scale,
data_format="channels_last",
**kwargs):
super(PolyBUnit, self).__init__(
two_way_scale=two_way_scale,
two_way_block=TwoWayBBlock,
poly_scale=poly_scale,
poly_res_block=poly_res_b_block,
poly_pre_block=PolyPreBBlock,
data_format=data_format,
**kwargs)
class PolyCUnit(PolyBaseUnit):
"""
PolyNet type C unit.
Parameters:
----------
two_way_scale : float
Scale value for 2-way stage.
poly_scale : float
Scale value for 2-way stage.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
two_way_scale,
poly_scale,
data_format="channels_last",
**kwargs):
super(PolyCUnit, self).__init__(
two_way_scale=two_way_scale,
two_way_block=TwoWayCBlock,
poly_scale=poly_scale,
poly_res_block=poly_res_c_block,
poly_pre_block=PolyPreCBlock,
data_format=data_format,
**kwargs)
class ReductionAUnit(nn.Layer):
"""
PolyNet type Reduction-A unit.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(ReductionAUnit, self).__init__(**kwargs)
in_channels = 384
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256, 384),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
data_format=data_format,
name="branch1"))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(384,),
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
data_format=data_format,
name="branch2"))
self.branches.add(MaxPoolBranch(
data_format=data_format,
name="branch3"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class ReductionBUnit(nn.Layer):
"""
PolyNet type Reduction-B unit.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(ReductionBUnit, self).__init__(**kwargs)
in_channels = 1152
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256, 256),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
data_format=data_format,
name="branch1"))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256),
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
data_format=data_format,
name="branch2"))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 384),
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
data_format=data_format,
name="branch3"))
self.branches.add(MaxPoolBranch(
data_format=data_format,
name="branch4"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class PolyBlock3a(nn.Layer):
"""
PolyNet type Mixed-3a block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(PolyBlock3a, self).__init__(**kwargs)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(MaxPoolBranch(
data_format=data_format,
name="branch1"))
self.branches.add(Conv3x3Branch(
in_channels=64,
out_channels=96,
data_format=data_format,
name="branch2"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class PolyBlock4a(nn.Layer):
"""
PolyNet type Mixed-4a block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(PolyBlock4a, self).__init__(**kwargs)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 0),
data_format=data_format,
name="branch1"))
self.branches.add(ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 64, 64, 96),
kernel_size_list=(1, (7, 1), (1, 7), 3),
strides_list=(1, 1, 1, 1),
padding_list=(0, (3, 0), (0, 3), 0),
data_format=data_format,
name="branch2"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class PolyBlock5a(nn.Layer):
"""
PolyNet type Mixed-5a block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(PolyBlock5a, self).__init__(**kwargs)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(MaxPoolBranch(
data_format=data_format,
name="branch1"))
self.branches.add(Conv3x3Branch(
in_channels=192,
out_channels=192,
data_format=data_format,
name="branch2"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class PolyInitBlock(nn.Layer):
"""
PolyNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
data_format="channels_last",
**kwargs):
super(PolyInitBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
padding=0,
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
data_format=data_format,
name="conv3")
self.block1 = PolyBlock3a(
data_format=data_format,
name="block1")
self.block2 = PolyBlock4a(
data_format=data_format,
name="block2")
self.block3 = PolyBlock5a(
data_format=data_format,
name="block3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
x = self.block1(x, training=training)
x = self.block2(x, training=training)
x = self.block3(x, training=training)
return x
class PolyNet(tf.keras.Model):
"""
PolyNet model from 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,'
https://arxiv.org/abs/1611.05725.
Parameters:
----------
two_way_scales : list of list of floats
Two way scale values for each normal unit.
poly_scales : list of list of floats
Three way scale values for each normal unit.
dropout_rate : float, default 0.2
Fraction of the input units to drop. Must be a number between 0 and 1.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (331, 331)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
two_way_scales,
poly_scales,
dropout_rate=0.2,
in_channels=3,
in_size=(331, 331),
classes=1000,
data_format="channels_last",
**kwargs):
super(PolyNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
normal_units = [PolyAUnit, PolyBUnit, PolyCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
self.features = SimpleSequential(name="features")
self.features.add(PolyInitBlock(
in_channels=in_channels,
data_format=data_format,
name="init_block"))
for i, (two_way_scales_per_stage, poly_scales_per_stage) in enumerate(zip(two_way_scales, poly_scales)):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, (two_way_scale, poly_scale) in enumerate(zip(two_way_scales_per_stage, poly_scales_per_stage)):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
stage.add(unit(
data_format=data_format,
name="unit{}".format(j + 1)))
else:
unit = normal_units[i]
stage.add(unit(
two_way_scale=two_way_scale,
poly_scale=poly_scale,
data_format=data_format,
name="unit{}".format(j + 1)))
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=9,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = SimpleSequential(name="output1")
self.output1.add(nn.Dropout(
rate=dropout_rate,
name="dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=2048,
name="fc"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_polynet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create PolyNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
two_way_scales = [
[1.000000, 0.992308, 0.984615, 0.976923, 0.969231, 0.961538, 0.953846, 0.946154, 0.938462, 0.930769],
[0.000000, 0.915385, 0.900000, 0.884615, 0.869231, 0.853846, 0.838462, 0.823077, 0.807692, 0.792308, 0.776923],
[0.000000, 0.761538, 0.746154, 0.730769, 0.715385, 0.700000]]
poly_scales = [
[0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000],
[0.000000, 0.923077, 0.907692, 0.892308, 0.876923, 0.861538, 0.846154, 0.830769, 0.815385, 0.800000, 0.784615],
[0.000000, 0.769231, 0.753846, 0.738462, 0.723077, 0.707692]]
net = PolyNet(
two_way_scales=two_way_scales,
poly_scales=poly_scales,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def polynet(**kwargs):
"""
PolyNet model from 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,'
https://arxiv.org/abs/1611.05725.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_polynet(model_name="polynet", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
polynet,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 331, 331) if is_channels_first(data_format) else (batch, 331, 331, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != polynet or weight_count == 95366600)
if __name__ == "__main__":
_test()
| 37,828 | 30.576795 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/resnet_cifar.py | """
ResNet for CIFAR/SVHN, implemented in TensorFlow.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['CIFARResNet', 'resnet20_cifar10', 'resnet20_cifar100', 'resnet20_svhn',
'resnet56_cifar10', 'resnet56_cifar100', 'resnet56_svhn',
'resnet110_cifar10', 'resnet110_cifar100', 'resnet110_svhn',
'resnet164bn_cifar10', 'resnet164bn_cifar100', 'resnet164bn_svhn',
'resnet272bn_cifar10', 'resnet272bn_cifar100', 'resnet272bn_svhn',
'resnet542bn_cifar10', 'resnet542bn_cifar100', 'resnet542bn_svhn',
'resnet1001_cifar10', 'resnet1001_cifar100', 'resnet1001_svhn',
'resnet1202_cifar10', 'resnet1202_cifar100', 'resnet1202_svhn']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv3x3_block, SimpleSequential, flatten, is_channels_first
from .resnet import ResUnit
class CIFARResNet(tf.keras.Model):
"""
ResNet model for CIFAR from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
classes=10,
data_format="channels_last",
**kwargs):
super(CIFARResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=False,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_resnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def resnet20_cifar10(classes=10, **kwargs):
"""
ResNet-20 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_cifar10", **kwargs)
def resnet20_cifar100(classes=100, **kwargs):
"""
ResNet-20 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_cifar100", **kwargs)
def resnet20_svhn(classes=10, **kwargs):
"""
ResNet-20 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_svhn", **kwargs)
def resnet56_cifar10(classes=10, **kwargs):
"""
ResNet-56 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_cifar10", **kwargs)
def resnet56_cifar100(classes=100, **kwargs):
"""
ResNet-56 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_cifar100", **kwargs)
def resnet56_svhn(classes=10, **kwargs):
"""
ResNet-56 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_svhn", **kwargs)
def resnet110_cifar10(classes=10, **kwargs):
"""
ResNet-110 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_cifar10", **kwargs)
def resnet110_cifar100(classes=100, **kwargs):
"""
ResNet-110 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_cifar100", **kwargs)
def resnet110_svhn(classes=10, **kwargs):
"""
ResNet-110 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_svhn", **kwargs)
def resnet164bn_cifar10(classes=10, **kwargs):
"""
ResNet-164(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_cifar10", **kwargs)
def resnet164bn_cifar100(classes=100, **kwargs):
"""
ResNet-164(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_cifar100", **kwargs)
def resnet164bn_svhn(classes=10, **kwargs):
"""
ResNet-164(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_svhn", **kwargs)
def resnet272bn_cifar10(classes=10, **kwargs):
"""
ResNet-272(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_cifar10", **kwargs)
def resnet272bn_cifar100(classes=100, **kwargs):
"""
ResNet-272(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_cifar100", **kwargs)
def resnet272bn_svhn(classes=10, **kwargs):
"""
ResNet-272(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_svhn", **kwargs)
def resnet542bn_cifar10(classes=10, **kwargs):
"""
ResNet-542(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_cifar10", **kwargs)
def resnet542bn_cifar100(classes=100, **kwargs):
"""
ResNet-542(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_cifar100", **kwargs)
def resnet542bn_svhn(classes=10, **kwargs):
"""
ResNet-272(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_svhn", **kwargs)
def resnet1001_cifar10(classes=10, **kwargs):
"""
ResNet-1001 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_cifar10", **kwargs)
def resnet1001_cifar100(classes=100, **kwargs):
"""
ResNet-1001 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_cifar100", **kwargs)
def resnet1001_svhn(classes=10, **kwargs):
"""
ResNet-1001 model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_svhn", **kwargs)
def resnet1202_cifar10(classes=10, **kwargs):
"""
ResNet-1202 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_cifar10", **kwargs)
def resnet1202_cifar100(classes=100, **kwargs):
"""
ResNet-1202 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_cifar100", **kwargs)
def resnet1202_svhn(classes=10, **kwargs):
"""
ResNet-1202 model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_svhn", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
(resnet20_cifar10, 10),
(resnet20_cifar100, 100),
(resnet20_svhn, 10),
(resnet56_cifar10, 10),
(resnet56_cifar100, 100),
(resnet56_svhn, 10),
(resnet110_cifar10, 10),
(resnet110_cifar100, 100),
(resnet110_svhn, 10),
(resnet164bn_cifar10, 10),
(resnet164bn_cifar100, 100),
(resnet164bn_svhn, 10),
(resnet272bn_cifar10, 10),
(resnet272bn_cifar100, 100),
(resnet272bn_svhn, 10),
(resnet542bn_cifar10, 10),
(resnet542bn_cifar100, 100),
(resnet542bn_svhn, 10),
(resnet1001_cifar10, 10),
(resnet1001_cifar100, 100),
(resnet1001_svhn, 10),
(resnet1202_cifar10, 10),
(resnet1202_cifar100, 100),
(resnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 32, 32) if is_channels_first(data_format) else (batch, 32, 32, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, classes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet20_cifar10 or weight_count == 272474)
assert (model != resnet20_cifar100 or weight_count == 278324)
assert (model != resnet20_svhn or weight_count == 272474)
assert (model != resnet56_cifar10 or weight_count == 855770)
assert (model != resnet56_cifar100 or weight_count == 861620)
assert (model != resnet56_svhn or weight_count == 855770)
assert (model != resnet110_cifar10 or weight_count == 1730714)
assert (model != resnet110_cifar100 or weight_count == 1736564)
assert (model != resnet110_svhn or weight_count == 1730714)
assert (model != resnet164bn_cifar10 or weight_count == 1704154)
assert (model != resnet164bn_cifar100 or weight_count == 1727284)
assert (model != resnet164bn_svhn or weight_count == 1704154)
assert (model != resnet272bn_cifar10 or weight_count == 2816986)
assert (model != resnet272bn_cifar100 or weight_count == 2840116)
assert (model != resnet272bn_svhn or weight_count == 2816986)
assert (model != resnet542bn_cifar10 or weight_count == 5599066)
assert (model != resnet542bn_cifar100 or weight_count == 5622196)
assert (model != resnet542bn_svhn or weight_count == 5599066)
assert (model != resnet1001_cifar10 or weight_count == 10328602)
assert (model != resnet1001_cifar100 or weight_count == 10351732)
assert (model != resnet1001_svhn or weight_count == 10328602)
assert (model != resnet1202_cifar10 or weight_count == 19424026)
assert (model != resnet1202_cifar100 or weight_count == 19429876)
assert (model != resnet1202_svhn or weight_count == 19424026)
if __name__ == "__main__":
_test()
| 23,420 | 35.883465 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/nasnet.py | """
NASNet-A for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Learning Transferable Architectures for Scalable Image Recognition,'
https://arxiv.org/abs/1707.07012.
"""
__all__ = ['NASNet', 'nasnet_4a1056', 'nasnet_6a4032', 'nasnet_dual_path_sequential']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import MaxPool2d, AvgPool2d, BatchNorm, Conv2d, conv1x1, DualPathSequential, SimpleSequential, flatten,\
is_channels_first, get_channel_axis
class NasDualPathScheme(object):
"""
NASNet specific scheme of dual path response for a block in a DualPathSequential module.
Parameters:
----------
can_skip_input : bool
Whether can skip input for some blocks.
"""
def __init__(self,
can_skip_input):
super(NasDualPathScheme, self).__init__()
self.can_skip_input = can_skip_input
def __call__(self,
block,
x,
x_prev,
training):
"""
Scheme function.
Parameters:
----------
block : nn.HybridBlock
A block.
x : Tensor
Current processed tensor.
x_prev : Tensor
Previous processed tensor.
training : bool or None
Whether to work in training mode or in inference mode.
Returns:
-------
x_next : Tensor
Next processed tensor.
x : Tensor
Current processed tensor.
"""
x_next = block(x, x_prev, training=training)
if type(x_next) == tuple:
x_next, x = x_next
if self.can_skip_input and hasattr(block, 'skip_input') and block.skip_input:
x = x_prev
return x_next, x
def nasnet_dual_path_scheme_ordinal(block,
x,
_,
training):
"""
NASNet specific scheme of dual path response for an ordinal block with dual inputs/outputs in a DualPathSequential
block.
Parameters:
----------
block : nn.HybridBlock
A block.
x : Tensor
Current processed tensor.
training : bool or None
Whether to work in training mode or in inference mode.
Returns:
-------
x_next : Tensor
Next processed tensor.
x : Tensor
Current processed tensor.
"""
return block(x, training=training), x
def nasnet_dual_path_sequential(return_two=True,
first_ordinals=0,
last_ordinals=0,
can_skip_input=False,
**kwargs):
"""
NASNet specific dual path sequential container.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first blocks with single input/output.
last_ordinals : int, default 0
Number of the final blocks with single input/output.
dual_path_scheme : function
Scheme of dual path response for a block.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal block.
can_skip_input : bool, default False
Whether can skip input for some blocks.
"""
return DualPathSequential(
return_two=return_two,
first_ordinals=first_ordinals,
last_ordinals=last_ordinals,
dual_path_scheme=NasDualPathScheme(can_skip_input=can_skip_input),
dual_path_scheme_ordinal=nasnet_dual_path_scheme_ordinal,
**kwargs)
def nasnet_batch_norm(channels,
data_format="channels_last",
**kwargs):
"""
NASNet specific Batch normalization layer.
Parameters:
----------
channels : int
Number of channels in input data.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
assert (channels is not None)
return BatchNorm(
momentum=0.1,
epsilon=0.001,
data_format=data_format,
**kwargs)
def nasnet_avgpool1x1_s2(data_format="channels_last",
**kwargs):
"""
NASNet specific 1x1 Average pooling layer with stride 2.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return AvgPool2d(
pool_size=1,
strides=2,
# count_include_pad=False,
data_format=data_format,
**kwargs)
def nasnet_avgpool3x3_s1(data_format="channels_last",
**kwargs):
"""
NASNet specific 3x3 Average pooling layer with stride 1.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return AvgPool2d(
pool_size=3,
strides=1,
padding=1,
# count_include_pad=False,
data_format=data_format,
**kwargs)
def nasnet_avgpool3x3_s2(data_format="channels_last",
**kwargs):
"""
NASNet specific 3x3 Average pooling layer with stride 2.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return AvgPool2d(
pool_size=3,
strides=2,
padding=1,
# count_include_pad=False,
data_format=data_format,
**kwargs)
class NasMaxPoolBlock(nn.Layer):
"""
NASNet specific Max pooling layer with extra padding.
Parameters:
----------
extra_padding : bool, default False
Whether to use extra padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
extra_padding=False,
data_format="channels_last",
**kwargs):
super(NasMaxPoolBlock, self).__init__(**kwargs)
self.extra_padding = extra_padding
self.data_format = data_format
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
if self.extra_padding:
self.pad = nn.ZeroPadding2D(
padding=((1, 0), (1, 0)),
data_format=data_format)
def call(self, x, training=None):
if self.extra_padding:
x = self.pad(x)
x = self.pool(x)
if self.extra_padding:
if is_channels_first(self.data_format):
x = x[:, :, 1:, 1:]
else:
x = x[:, 1:, 1:, :]
return x
class NasAvgPoolBlock(nn.Layer):
"""
NASNet specific 3x3 Average pooling layer with extra padding.
Parameters:
----------
extra_padding : bool, default False
Whether to use extra padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
extra_padding=False,
data_format="channels_last",
**kwargs):
super(NasAvgPoolBlock, self).__init__(**kwargs)
self.extra_padding = extra_padding
self.data_format = data_format
self.pool = AvgPool2d(
pool_size=3,
strides=2,
padding=1,
# count_include_pad=False,
data_format=data_format,
name="pool")
if self.extra_padding:
self.pad = nn.ZeroPadding2D(
padding=((1, 0), (1, 0)),
data_format=data_format)
def call(self, x, training=None):
if self.extra_padding:
x = self.pad(x)
x = self.pool(x)
if self.extra_padding:
if is_channels_first(self.data_format):
x = x[:, :, 1:, 1:]
else:
x = x[:, 1:, 1:, :]
return x
class NasConv(nn.Layer):
"""
NASNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int
Number of groups.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
groups,
data_format="channels_last",
**kwargs):
super(NasConv, self).__init__(**kwargs)
self.activ = nn.ReLU()
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
use_bias=False,
data_format=data_format,
name="conv")
self.bn = nasnet_batch_norm(
channels=out_channels,
data_format=data_format,
name="bn")
def call(self, x, training=None):
x = self.activ(x)
x = self.conv(x)
x = self.bn(x, training=training)
return x
def nas_conv1x1(in_channels,
out_channels,
data_format="channels_last",
**kwargs):
"""
1x1 version of the NASNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return NasConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=1,
padding=0,
groups=1,
data_format=data_format,
**kwargs)
class DwsConv(nn.Layer):
"""
Standard depthwise separable convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
use_bias : bool, default False
Whether the layers use a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
use_bias=False,
data_format="channels_last",
**kwargs):
super(DwsConv, self).__init__(**kwargs)
self.dw_conv = Conv2d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=in_channels,
use_bias=use_bias,
data_format=data_format,
name="dw_conv")
self.pw_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
data_format=data_format,
name="pw_conv")
def call(self, x, training=None):
x = self.dw_conv(x, training=training)
x = self.pw_conv(x, training=training)
return x
class NasDwsConv(nn.Layer):
"""
NASNet specific depthwise separable convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
extra_padding : bool, default False
Whether to use extra padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
extra_padding=False,
data_format="channels_last",
**kwargs):
super(NasDwsConv, self).__init__(**kwargs)
self.extra_padding = extra_padding
self.data_format = data_format
self.activ = nn.ReLU()
self.conv = DwsConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
data_format=data_format,
name="conv")
self.bn = nasnet_batch_norm(
channels=out_channels,
data_format=data_format,
name="bn")
if self.extra_padding:
self.pad = nn.ZeroPadding2D(
padding=((1, 0), (1, 0)),
data_format=data_format)
def call(self, x, training=None):
x = self.activ(x)
if self.extra_padding:
x = self.pad(x)
x = self.conv(x, training=training)
if self.extra_padding:
if is_channels_first(self.data_format):
x = x[:, :, 1:, 1:]
else:
x = x[:, 1:, 1:, :]
x = self.bn(x, training=training)
return x
class DwsBranch(nn.Layer):
"""
NASNet specific block with depthwise separable convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
extra_padding=False,
stem=False,
data_format="channels_last",
**kwargs):
super(DwsBranch, self).__init__(**kwargs)
assert (not stem) or (not extra_padding)
mid_channels = out_channels if stem else in_channels
self.conv1 = NasDwsConv(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
extra_padding=extra_padding,
data_format=data_format,
name="conv1")
self.conv2 = NasDwsConv(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=1,
padding=padding,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
def dws_branch_k3_s1_p1(in_channels,
out_channels,
extra_padding=False,
data_format="channels_last",
**kwargs):
"""
3x3/1/1 version of the NASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
extra_padding : bool, default False
Whether to use extra padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=1,
padding=1,
extra_padding=extra_padding,
data_format=data_format,
**kwargs)
def dws_branch_k5_s1_p2(in_channels,
out_channels,
extra_padding=False,
data_format="channels_last",
**kwargs):
"""
5x5/1/2 version of the NASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
extra_padding : bool, default False
Whether to use extra padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=1,
padding=2,
extra_padding=extra_padding,
data_format=data_format,
**kwargs)
def dws_branch_k5_s2_p2(in_channels,
out_channels,
extra_padding=False,
stem=False,
data_format="channels_last",
**kwargs):
"""
5x5/2/2 version of the NASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=2,
padding=2,
extra_padding=extra_padding,
stem=stem,
data_format=data_format,
**kwargs)
def dws_branch_k7_s2_p3(in_channels,
out_channels,
extra_padding=False,
stem=False,
data_format="channels_last",
**kwargs):
"""
7x7/2/3 version of the NASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
extra_padding=extra_padding,
stem=stem,
data_format=data_format,
**kwargs)
class NasPathBranch(nn.Layer):
"""
NASNet specific `path` branch (auxiliary block).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
extra_padding : bool, default False
Whether to use extra padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
extra_padding=False,
data_format="channels_last",
**kwargs):
super(NasPathBranch, self).__init__(**kwargs)
self.extra_padding = extra_padding
self.data_format = data_format
self.avgpool = nasnet_avgpool1x1_s2(
data_format=data_format,
name="")
self.conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="")
if self.extra_padding:
self.pad = nn.ZeroPadding2D(
padding=((0, 1), (0, 1)),
data_format=data_format)
def call(self, x, training=None):
if self.extra_padding:
x = self.pad(x)
if is_channels_first(self.data_format):
x = x[:, :, 1:, 1:]
else:
x = x[:, 1:, 1:, :]
x = self.avgpool(x)
x = self.conv(x, training=training)
return x
class NasPathBlock(nn.Layer):
"""
NASNet specific `path` block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(NasPathBlock, self).__init__(**kwargs)
self.data_format = data_format
mid_channels = out_channels // 2
self.activ = nn.ReLU()
self.path1 = NasPathBranch(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="path1")
self.path2 = NasPathBranch(
in_channels=in_channels,
out_channels=mid_channels,
extra_padding=True,
data_format=data_format,
name="path2")
self.bn = nasnet_batch_norm(
channels=out_channels,
data_format=data_format,
name="bn")
def call(self, x, training=None):
x = self.activ(x)
x1 = self.path1(x, training=training)
x2 = self.path2(x, training=training)
x = tf.concat([x1, x2], axis=get_channel_axis(self.data_format))
x = self.bn(x, training=training)
return x
class Stem1Unit(nn.Layer):
"""
NASNet Stem1 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(Stem1Unit, self).__init__(**kwargs)
self.data_format = data_format
mid_channels = out_channels // 4
self.conv1x1 = nas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1x1")
self.comb0_left = dws_branch_k5_s2_p2(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb0_left")
self.comb0_right = dws_branch_k7_s2_p3(
in_channels=in_channels,
out_channels=mid_channels,
stem=True,
data_format=data_format,
name="comb0_right")
self.comb1_left = NasMaxPoolBlock(
extra_padding=False,
data_format=data_format,
name="comb1_left")
self.comb1_right = dws_branch_k7_s2_p3(
in_channels=in_channels,
out_channels=mid_channels,
stem=True,
data_format=data_format,
name="comb1_right")
self.comb2_left = nasnet_avgpool3x3_s2(
data_format=data_format,
name="comb2_left")
self.comb2_right = dws_branch_k5_s2_p2(
in_channels=in_channels,
out_channels=mid_channels,
stem=True,
data_format=data_format,
name="comb2_right")
self.comb3_right = nasnet_avgpool3x3_s1(
data_format=data_format,
name="comb3_right")
self.comb4_left = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb4_left")
self.comb4_right = NasMaxPoolBlock(
extra_padding=False,
data_format=data_format,
name="comb4_right")
def call(self, x, _=None, training=None):
x_left = self.conv1x1(x, training=training)
x_right = x
x0 = self.comb0_left(x_left, training=training) + self.comb0_right(x_right, training=training)
x1 = self.comb1_left(x_left, training=training) + self.comb1_right(x_right, training=training)
x2 = self.comb2_left(x_left, training=training) + self.comb2_right(x_right, training=training)
x3 = x1 + self.comb3_right(x0, training=training)
x4 = self.comb4_left(x0, training=training) + self.comb4_right(x_left, training=training)
x_out = tf.concat([x1, x2, x3, x4], axis=get_channel_axis(self.data_format))
return x_out
class Stem2Unit(nn.Layer):
"""
NASNet Stem2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
extra_padding : bool
Whether to use extra padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
extra_padding,
data_format="channels_last",
**kwargs):
super(Stem2Unit, self).__init__(**kwargs)
self.data_format = data_format
mid_channels = out_channels // 4
self.conv1x1 = nas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1x1")
self.path = NasPathBlock(
in_channels=prev_in_channels,
out_channels=mid_channels,
data_format=data_format,
name="path")
self.comb0_left = dws_branch_k5_s2_p2(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding,
data_format=data_format,
name="comb0_left")
self.comb0_right = dws_branch_k7_s2_p3(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding,
data_format=data_format,
name="comb0_right")
self.comb1_left = NasMaxPoolBlock(
extra_padding=extra_padding,
data_format=data_format,
name="comb1_left")
self.comb1_right = dws_branch_k7_s2_p3(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding,
data_format=data_format,
name="comb1_right")
self.comb2_left = NasAvgPoolBlock(
extra_padding=extra_padding,
data_format=data_format,
name="comb2_left")
self.comb2_right = dws_branch_k5_s2_p2(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding,
data_format=data_format,
name="comb2_right")
self.comb3_right = nasnet_avgpool3x3_s1(
data_format=data_format,
name="comb3_right")
self.comb4_left = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding,
data_format=data_format,
name="comb4_left")
self.comb4_right = NasMaxPoolBlock(
extra_padding=extra_padding,
data_format=data_format,
name="comb4_right")
def call(self, x, x_prev, training=None):
x_left = self.conv1x1(x, training=training)
x_right = self.path(x_prev, training=training)
x0 = self.comb0_left(x_left, training=training) + self.comb0_right(x_right, training=training)
x1 = self.comb1_left(x_left, training=training) + self.comb1_right(x_right, training=training)
x2 = self.comb2_left(x_left, training=training) + self.comb2_right(x_right, training=training)
x3 = x1 + self.comb3_right(x0, training=training)
x4 = self.comb4_left(x0, training=training) + self.comb4_right(x_left, training=training)
x_out = tf.concat([x1, x2, x3, x4], axis=get_channel_axis(self.data_format))
return x_out
class FirstUnit(nn.Layer):
"""
NASNet First unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(FirstUnit, self).__init__(**kwargs)
self.data_format = data_format
mid_channels = out_channels // 6
self.conv1x1 = nas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1x1")
self.path = NasPathBlock(
in_channels=prev_in_channels,
out_channels=mid_channels,
data_format=data_format,
name="path")
self.comb0_left = dws_branch_k5_s1_p2(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb0_left")
self.comb0_right = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb0_right")
self.comb1_left = dws_branch_k5_s1_p2(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb1_left")
self.comb1_right = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb1_right")
self.comb2_left = nasnet_avgpool3x3_s1(
data_format=data_format,
name="comb2_left")
self.comb3_left = nasnet_avgpool3x3_s1(
data_format=data_format,
name="comb3_left")
self.comb3_right = nasnet_avgpool3x3_s1(
data_format=data_format,
name="comb3_right")
self.comb4_left = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb4_left")
def call(self, x, x_prev, training=None):
x_left = self.conv1x1(x, training=training)
x_right = self.path(x_prev, training=training)
x0 = self.comb0_left(x_left, training=training) + self.comb0_right(x_right, training=training)
x1 = self.comb1_left(x_right, training=training) + self.comb1_right(x_right, training=training)
x2 = self.comb2_left(x_left, training=training) + x_right
x3 = self.comb3_left(x_right, training=training) + self.comb3_right(x_right, training=training)
x4 = self.comb4_left(x_left, training=training) + x_left
x_out = tf.concat([x_right, x0, x1, x2, x3, x4], axis=get_channel_axis(self.data_format))
return x_out
class NormalUnit(nn.Layer):
"""
NASNet Normal unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(NormalUnit, self).__init__(**kwargs)
self.data_format = data_format
mid_channels = out_channels // 6
self.conv1x1_prev = nas_conv1x1(
in_channels=prev_in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1x1_prev")
self.conv1x1 = nas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1x1")
self.comb0_left = dws_branch_k5_s1_p2(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb0_left")
self.comb0_right = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb0_right")
self.comb1_left = dws_branch_k5_s1_p2(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb1_left")
self.comb1_right = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb1_right")
self.comb2_left = nasnet_avgpool3x3_s1(
data_format=data_format,
name="comb2_left")
self.comb3_left = nasnet_avgpool3x3_s1(
data_format=data_format,
name="comb3_left")
self.comb3_right = nasnet_avgpool3x3_s1(
data_format=data_format,
name="comb3_right")
self.comb4_left = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb4_left")
def call(self, x, x_prev, training=None):
x_left = self.conv1x1(x, training=training)
x_right = self.conv1x1_prev(x_prev, training=training)
x0 = self.comb0_left(x_left, training=training) + self.comb0_right(x_right, training=training)
x1 = self.comb1_left(x_right, training=training) + self.comb1_right(x_right, training=training)
x2 = self.comb2_left(x_left, training=training) + x_right
x3 = self.comb3_left(x_right, training=training) + self.comb3_right(x_right, training=training)
x4 = self.comb4_left(x_left, training=training) + x_left
x_out = tf.concat([x_right, x0, x1, x2, x3, x4], axis=get_channel_axis(self.data_format))
return x_out
class ReductionBaseUnit(nn.Layer):
"""
NASNet Reduction base unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
extra_padding : bool, default True
Whether to use extra padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
extra_padding=True,
data_format="channels_last",
**kwargs):
super(ReductionBaseUnit, self).__init__(**kwargs)
self.data_format = data_format
self.skip_input = True
mid_channels = out_channels // 4
self.conv1x1_prev = nas_conv1x1(
in_channels=prev_in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1x1_prev")
self.conv1x1 = nas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1x1")
self.comb0_left = dws_branch_k5_s2_p2(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding,
data_format=data_format,
name="comb0_left")
self.comb0_right = dws_branch_k7_s2_p3(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding,
data_format=data_format,
name="comb0_right")
self.comb1_left = NasMaxPoolBlock(
extra_padding=extra_padding,
data_format=data_format,
name="comb1_left")
self.comb1_right = dws_branch_k7_s2_p3(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding,
data_format=data_format,
name="comb1_right")
self.comb2_left = NasAvgPoolBlock(
extra_padding=extra_padding,
data_format=data_format,
name="comb2_left")
self.comb2_right = dws_branch_k5_s2_p2(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding,
data_format=data_format,
name="comb2_right")
self.comb3_right = nasnet_avgpool3x3_s1(
data_format=data_format,
name="comb3_right")
self.comb4_left = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding,
data_format=data_format,
name="comb4_left")
self.comb4_right = NasMaxPoolBlock(
extra_padding=extra_padding,
data_format=data_format,
name="comb4_right")
def call(self, x, x_prev, training=None):
x_left = self.conv1x1(x, training=training)
x_right = self.conv1x1_prev(x_prev, training=training)
x0 = self.comb0_left(x_left, training=training) + self.comb0_right(x_right, training=training)
x1 = self.comb1_left(x_left, training=training) + self.comb1_right(x_right, training=training)
x2 = self.comb2_left(x_left, training=training) + self.comb2_right(x_right, training=training)
x3 = x1 + self.comb3_right(x0, training=training)
x4 = self.comb4_left(x0, training=training) + self.comb4_right(x_left, training=training)
x_out = tf.concat([x1, x2, x3, x4], axis=get_channel_axis(self.data_format))
return x_out
class Reduction1Unit(ReductionBaseUnit):
"""
NASNet Reduction1 unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(Reduction1Unit, self).__init__(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
extra_padding=True,
data_format=data_format,
**kwargs)
class Reduction2Unit(ReductionBaseUnit):
"""
NASNet Reduction2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
extra_padding : bool
Whether to use extra padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
extra_padding,
data_format="channels_last",
**kwargs):
super(Reduction2Unit, self).__init__(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
extra_padding=extra_padding,
data_format=data_format,
**kwargs)
class NASNetInitBlock(nn.Layer):
"""
NASNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(NASNetInitBlock, self).__init__(**kwargs)
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=2,
padding=0,
use_bias=False,
data_format=data_format,
name="conv")
self.bn = nasnet_batch_norm(
channels=out_channels,
data_format=data_format,
name="bn")
def call(self, x, training=None):
x = self.conv(x)
x = self.bn(x, training=training)
return x
class NASNet(tf.keras.Model):
"""
NASNet-A model from 'Learning Transferable Architectures for Scalable Image Recognition,'
https://arxiv.org/abs/1707.07012.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
stem_blocks_channels : list of 2 int
Number of output channels for the Stem units.
final_pool_size : int
Size of the pooling windows for final pool.
extra_padding : bool
Whether to use extra padding.
skip_reduction_layer_input : bool
Whether to skip the reduction layers when calculating the previous layer to connect to.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
stem_blocks_channels,
final_pool_size,
extra_padding,
skip_reduction_layer_input,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(NASNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
reduction_units = [Reduction1Unit, Reduction2Unit]
self.features = nasnet_dual_path_sequential(
return_two=False,
first_ordinals=1,
last_ordinals=2,
name="features")
self.features.children.append(NASNetInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
out_channels = stem_blocks_channels[0]
self.features.children.append(Stem1Unit(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="stem1_unit"))
prev_in_channels = in_channels
in_channels = out_channels
out_channels = stem_blocks_channels[1]
self.features.children.append(Stem2Unit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
extra_padding=extra_padding,
data_format=data_format,
name="stem2_unit"))
prev_in_channels = in_channels
in_channels = out_channels
for i, channels_per_stage in enumerate(channels):
stage = nasnet_dual_path_sequential(
can_skip_input=skip_reduction_layer_input,
name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
elif ((i == 0) and (j == 0)) or ((i != 0) and (j == 1)):
unit = FirstUnit
else:
unit = NormalUnit
if unit == Reduction2Unit:
stage.children.append(Reduction2Unit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
extra_padding=extra_padding,
data_format=data_format,
name="unit{}".format(j + 1)))
else:
stage.children.append(unit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
data_format=data_format,
name="unit{}".format(j + 1)))
prev_in_channels = in_channels
in_channels = out_channels
self.features.children.append(stage)
self.features.children.append(nn.ReLU(name="activ"))
self.features.children.append(nn.AveragePooling2D(
pool_size=final_pool_size,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = SimpleSequential(name="output1")
self.output1.add(nn.Dropout(
rate=0.5,
name="dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=in_channels,
name="fc"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_nasnet(repeat,
penultimate_filters,
init_block_channels,
final_pool_size,
extra_padding,
skip_reduction_layer_input,
in_size,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create NASNet-A model with specific parameters.
Parameters:
----------
repeat : int
NNumber of cell repeats.
penultimate_filters : int
Number of filters in the penultimate layer of the network.
init_block_channels : int
Number of output channels for the initial unit.
final_pool_size : int
Size of the pooling windows for final pool.
extra_padding : bool
Whether to use extra padding.
skip_reduction_layer_input : bool
Whether to skip the reduction layers when calculating the previous layer to connect to.
in_size : tuple of two ints
Spatial size of the expected input image.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
stem_blocks_channels = [1, 2]
reduct_channels = [[], [8], [16]]
norm_channels = [6, 12, 24]
channels = [rci + [nci] * repeat for rci, nci in zip(reduct_channels, norm_channels)]
base_channel_chunk = penultimate_filters // channels[-1][-1]
stem_blocks_channels = [(ci * base_channel_chunk) for ci in stem_blocks_channels]
channels = [[(cij * base_channel_chunk) for cij in ci] for ci in channels]
net = NASNet(
channels=channels,
init_block_channels=init_block_channels,
stem_blocks_channels=stem_blocks_channels,
final_pool_size=final_pool_size,
extra_padding=extra_padding,
skip_reduction_layer_input=skip_reduction_layer_input,
in_size=in_size,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def nasnet_4a1056(**kwargs):
"""
NASNet-A 4@1056 (NASNet-A-Mobile) model from 'Learning Transferable Architectures for Scalable Image Recognition,'
https://arxiv.org/abs/1707.07012.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_nasnet(
repeat=4,
penultimate_filters=1056,
init_block_channels=32,
final_pool_size=7,
extra_padding=True,
skip_reduction_layer_input=False,
in_size=(224, 224),
model_name="nasnet_4a1056",
**kwargs)
def nasnet_6a4032(**kwargs):
"""
NASNet-A 6@4032 (NASNet-A-Large) model from 'Learning Transferable Architectures for Scalable Image Recognition,'
https://arxiv.org/abs/1707.07012.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_nasnet(
repeat=6,
penultimate_filters=4032,
init_block_channels=96,
final_pool_size=11,
extra_padding=False,
skip_reduction_layer_input=True,
in_size=(331, 331),
model_name="nasnet_6a4032",
**kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
nasnet_4a1056,
nasnet_6a4032,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 331, 331) if is_channels_first(data_format) else (batch, 331, 331, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != nasnet_4a1056 or weight_count == 5289978)
assert (model != nasnet_6a4032 or weight_count == 88753150)
if __name__ == "__main__":
_test()
| 52,300 | 31.047181 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/resnext_cifar.py | """
ResNeXt for CIFAR/SVHN, implemented in TensorFlow.
Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
"""
__all__ = ['CIFARResNeXt', 'resnext20_1x64d_cifar10', 'resnext20_1x64d_cifar100', 'resnext20_1x64d_svhn',
'resnext20_2x32d_cifar10', 'resnext20_2x32d_cifar100', 'resnext20_2x32d_svhn',
'resnext20_2x64d_cifar10', 'resnext20_2x64d_cifar100', 'resnext20_2x64d_svhn',
'resnext20_4x16d_cifar10', 'resnext20_4x16d_cifar100', 'resnext20_4x16d_svhn',
'resnext20_4x32d_cifar10', 'resnext20_4x32d_cifar100', 'resnext20_4x32d_svhn',
'resnext20_8x8d_cifar10', 'resnext20_8x8d_cifar100', 'resnext20_8x8d_svhn',
'resnext20_8x16d_cifar10', 'resnext20_8x16d_cifar100', 'resnext20_8x16d_svhn',
'resnext20_16x4d_cifar10', 'resnext20_16x4d_cifar100', 'resnext20_16x4d_svhn',
'resnext20_16x8d_cifar10', 'resnext20_16x8d_cifar100', 'resnext20_16x8d_svhn',
'resnext20_32x2d_cifar10', 'resnext20_32x2d_cifar100', 'resnext20_32x2d_svhn',
'resnext20_32x4d_cifar10', 'resnext20_32x4d_cifar100', 'resnext20_32x4d_svhn',
'resnext20_64x1d_cifar10', 'resnext20_64x1d_cifar100', 'resnext20_64x1d_svhn',
'resnext20_64x2d_cifar10', 'resnext20_64x2d_cifar100', 'resnext20_64x2d_svhn',
'resnext29_32x4d_cifar10', 'resnext29_32x4d_cifar100', 'resnext29_32x4d_svhn',
'resnext29_16x64d_cifar10', 'resnext29_16x64d_cifar100', 'resnext29_16x64d_svhn',
'resnext56_1x64d_cifar10', 'resnext56_1x64d_cifar100', 'resnext56_1x64d_svhn',
'resnext56_2x32d_cifar10', 'resnext56_2x32d_cifar100', 'resnext56_2x32d_svhn',
'resnext56_4x16d_cifar10', 'resnext56_4x16d_cifar100', 'resnext56_4x16d_svhn',
'resnext56_8x8d_cifar10', 'resnext56_8x8d_cifar100', 'resnext56_8x8d_svhn',
'resnext56_16x4d_cifar10', 'resnext56_16x4d_cifar100', 'resnext56_16x4d_svhn',
'resnext56_32x2d_cifar10', 'resnext56_32x2d_cifar100', 'resnext56_32x2d_svhn',
'resnext56_64x1d_cifar10', 'resnext56_64x1d_cifar100', 'resnext56_64x1d_svhn',
'resnext272_1x64d_cifar10', 'resnext272_1x64d_cifar100', 'resnext272_1x64d_svhn',
'resnext272_2x32d_cifar10', 'resnext272_2x32d_cifar100', 'resnext272_2x32d_svhn']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv3x3_block, SimpleSequential, flatten, is_channels_first
from .resnext import ResNeXtUnit
class CIFARResNeXt(tf.keras.Model):
"""
ResNeXt model for CIFAR from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(32, 32),
classes=10,
data_format="channels_last",
**kwargs):
super(CIFARResNeXt, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_resnext_cifar(classes,
blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
ResNeXt model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
assert (blocks - 2) % 9 == 0
layers = [(blocks - 2) // 9] * 3
channels_per_layers = [256, 512, 1024]
init_block_channels = 64
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CIFARResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def resnext20_1x64d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (1x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=1, bottleneck_width=64,
model_name="resnext20_1x64d_cifar10", **kwargs)
def resnext20_1x64d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (1x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=1, bottleneck_width=64,
model_name="resnext20_1x64d_cifar100", **kwargs)
def resnext20_1x64d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (1x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=1, bottleneck_width=64,
model_name="resnext20_1x64d_svhn", **kwargs)
def resnext20_2x32d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (2x32d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=32,
model_name="resnext20_2x32d_cifar10", **kwargs)
def resnext20_2x32d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (2x32d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=32,
model_name="resnext20_2x32d_cifar100", **kwargs)
def resnext20_2x32d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (2x32d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=32,
model_name="resnext20_2x32d_svhn", **kwargs)
def resnext20_2x64d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (2x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=64,
model_name="resnext20_2x64d_cifar10", **kwargs)
def resnext20_2x64d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (2x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=64,
model_name="resnext20_2x64d_cifar100", **kwargs)
def resnext20_2x64d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (2x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=64,
model_name="resnext20_2x64d_svhn", **kwargs)
def resnext20_4x16d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (4x16d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=16,
model_name="resnext20_4x16d_cifar10", **kwargs)
def resnext20_4x16d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (4x16d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=16,
model_name="resnext20_4x16d_cifar100", **kwargs)
def resnext20_4x16d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (4x16d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=16,
model_name="resnext20_4x16d_svhn", **kwargs)
def resnext20_4x32d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (4x32d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=32,
model_name="resnext20_4x32d_cifar10", **kwargs)
def resnext20_4x32d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (4x32d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=32,
model_name="resnext20_4x32d_cifar100", **kwargs)
def resnext20_4x32d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (4x32d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=32,
model_name="resnext20_4x32d_svhn", **kwargs)
def resnext20_8x8d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (8x8d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=8,
model_name="resnext20_8x8d_cifar10", **kwargs)
def resnext20_8x8d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (8x8d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=8,
model_name="resnext20_8x8d_cifar100", **kwargs)
def resnext20_8x8d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (8x8d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=8,
model_name="resnext20_8x8d_svhn", **kwargs)
def resnext20_8x16d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (8x16d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=16,
model_name="resnext20_8x16d_cifar10", **kwargs)
def resnext20_8x16d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (8x16d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=16,
model_name="resnext20_8x16d_cifar100", **kwargs)
def resnext20_8x16d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (8x16d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=16,
model_name="resnext20_8x16d_svhn", **kwargs)
def resnext20_16x4d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (16x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=4,
model_name="resnext20_16x4d_cifar10", **kwargs)
def resnext20_16x4d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (16x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=4,
model_name="resnext20_16x4d_cifar100", **kwargs)
def resnext20_16x4d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (16x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=4,
model_name="resnext20_16x4d_svhn", **kwargs)
def resnext20_16x8d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (16x8d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=8,
model_name="resnext20_16x8d_cifar10", **kwargs)
def resnext20_16x8d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (16x8d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=8,
model_name="resnext20_16x8d_cifar100", **kwargs)
def resnext20_16x8d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (16x8d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=8,
model_name="resnext20_16x8d_svhn", **kwargs)
def resnext20_32x2d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (32x2d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=2,
model_name="resnext20_32x2d_cifar10", **kwargs)
def resnext20_32x2d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (32x2d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=2,
model_name="resnext20_32x2d_cifar100", **kwargs)
def resnext20_32x2d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (32x2d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=2,
model_name="resnext20_32x2d_svhn", **kwargs)
def resnext20_32x4d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (32x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=4,
model_name="resnext20_32x4d_cifar10", **kwargs)
def resnext20_32x4d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (32x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=4,
model_name="resnext20_32x4d_cifar100", **kwargs)
def resnext20_32x4d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (32x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=4,
model_name="resnext20_32x4d_svhn", **kwargs)
def resnext20_64x1d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (64x1d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=1,
model_name="resnext20_64x1d_cifar10", **kwargs)
def resnext20_64x1d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (64x1d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=1,
model_name="resnext20_64x1d_cifar100", **kwargs)
def resnext20_64x1d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (64x1d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=1,
model_name="resnext20_64x1d_svhn", **kwargs)
def resnext20_64x2d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (64x2d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=2,
model_name="resnext20_64x2d_cifar10", **kwargs)
def resnext20_64x2d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (64x2d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=2,
model_name="resnext20_64x2d_cifar100", **kwargs)
def resnext20_64x2d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (64x1d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=2,
model_name="resnext20_64x2d_svhn", **kwargs)
def resnext29_32x4d_cifar10(classes=10, **kwargs):
"""
ResNeXt-29 (32x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=29, cardinality=32, bottleneck_width=4,
model_name="resnext29_32x4d_cifar10", **kwargs)
def resnext29_32x4d_cifar100(classes=100, **kwargs):
"""
ResNeXt-29 (32x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=29, cardinality=32, bottleneck_width=4,
model_name="resnext29_32x4d_cifar100", **kwargs)
def resnext29_32x4d_svhn(classes=10, **kwargs):
"""
ResNeXt-29 (32x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=29, cardinality=32, bottleneck_width=4,
model_name="resnext29_32x4d_svhn", **kwargs)
def resnext29_16x64d_cifar10(classes=10, **kwargs):
"""
ResNeXt-29 (16x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=29, cardinality=16, bottleneck_width=64,
model_name="resnext29_16x64d_cifar10", **kwargs)
def resnext29_16x64d_cifar100(classes=100, **kwargs):
"""
ResNeXt-29 (16x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=29, cardinality=16, bottleneck_width=64,
model_name="resnext29_16x64d_cifar100", **kwargs)
def resnext29_16x64d_svhn(classes=10, **kwargs):
"""
ResNeXt-29 (16x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=29, cardinality=16, bottleneck_width=64,
model_name="resnext29_16x64d_svhn", **kwargs)
def resnext56_1x64d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (1x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=1, bottleneck_width=64,
model_name="resnext56_1x64d_cifar10", **kwargs)
def resnext56_1x64d_cifar100(classes=100, **kwargs):
"""
ResNeXt-56 (1x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=1, bottleneck_width=64,
model_name="resnext56_1x64d_cifar100", **kwargs)
def resnext56_1x64d_svhn(classes=10, **kwargs):
"""
ResNeXt-56 (1x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=1, bottleneck_width=64,
model_name="resnext56_1x64d_svhn", **kwargs)
def resnext56_2x32d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (2x32d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=2, bottleneck_width=32,
model_name="resnext56_2x32d_cifar10", **kwargs)
def resnext56_2x32d_cifar100(classes=100, **kwargs):
"""
ResNeXt-56 (2x32d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=2, bottleneck_width=32,
model_name="resnext56_2x32d_cifar100", **kwargs)
def resnext56_2x32d_svhn(classes=10, **kwargs):
"""
ResNeXt-56 (2x32d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=2, bottleneck_width=32,
model_name="resnext56_2x32d_svhn", **kwargs)
def resnext56_4x16d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (4x16d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=4, bottleneck_width=16,
model_name="resnext56_4x16d_cifar10", **kwargs)
def resnext56_4x16d_cifar100(classes=100, **kwargs):
"""
ResNeXt-56 (4x16d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=4, bottleneck_width=16,
model_name="resnext56_4x16d_cifar100", **kwargs)
def resnext56_4x16d_svhn(classes=10, **kwargs):
"""
ResNeXt-56 (4x16d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=4, bottleneck_width=16,
model_name="resnext56_4x16d_svhn", **kwargs)
def resnext56_8x8d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (8x8d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=8, bottleneck_width=8,
model_name="resnext56_8x8d_cifar10", **kwargs)
def resnext56_8x8d_cifar100(classes=100, **kwargs):
"""
ResNeXt-56 (8x8d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=8, bottleneck_width=8,
model_name="resnext56_8x8d_cifar100", **kwargs)
def resnext56_8x8d_svhn(classes=10, **kwargs):
"""
ResNeXt-56 (8x8d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=8, bottleneck_width=8,
model_name="resnext56_8x8d_svhn", **kwargs)
def resnext56_16x4d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (16x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=16, bottleneck_width=4,
model_name="resnext56_16x4d_cifar10", **kwargs)
def resnext56_16x4d_cifar100(classes=100, **kwargs):
"""
ResNeXt-56 (16x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=16, bottleneck_width=4,
model_name="resnext56_16x4d_cifar100", **kwargs)
def resnext56_16x4d_svhn(classes=10, **kwargs):
"""
ResNeXt-56 (16x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=16, bottleneck_width=4,
model_name="resnext56_16x4d_svhn", **kwargs)
def resnext56_32x2d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (32x2d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=32, bottleneck_width=2,
model_name="resnext56_32x2d_cifar10", **kwargs)
def resnext56_32x2d_cifar100(classes=100, **kwargs):
"""
ResNeXt-56 (32x2d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=32, bottleneck_width=2,
model_name="resnext56_32x2d_cifar100", **kwargs)
def resnext56_32x2d_svhn(classes=10, **kwargs):
"""
ResNeXt-56 (32x2d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=32, bottleneck_width=2,
model_name="resnext56_32x2d_svhn", **kwargs)
def resnext56_64x1d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (64x1d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=64, bottleneck_width=1,
model_name="resnext56_64x1d_cifar10", **kwargs)
def resnext56_64x1d_cifar100(classes=100, **kwargs):
"""
ResNeXt-56 (64x1d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=64, bottleneck_width=1,
model_name="resnext56_64x1d_cifar100", **kwargs)
def resnext56_64x1d_svhn(classes=10, **kwargs):
"""
ResNeXt-56 (64x1d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=64, bottleneck_width=1,
model_name="resnext56_64x1d_svhn", **kwargs)
def resnext272_1x64d_cifar10(classes=10, **kwargs):
"""
ResNeXt-272 (1x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=272, cardinality=1, bottleneck_width=64,
model_name="resnext272_1x64d_cifar10", **kwargs)
def resnext272_1x64d_cifar100(classes=100, **kwargs):
"""
ResNeXt-272 (1x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=272, cardinality=1, bottleneck_width=64,
model_name="resnext272_1x64d_cifar100", **kwargs)
def resnext272_1x64d_svhn(classes=10, **kwargs):
"""
ResNeXt-272 (1x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=272, cardinality=1, bottleneck_width=64,
model_name="resnext272_1x64d_svhn", **kwargs)
def resnext272_2x32d_cifar10(classes=10, **kwargs):
"""
ResNeXt-272 (2x32d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=272, cardinality=2, bottleneck_width=32,
model_name="resnext272_2x32d_cifar10", **kwargs)
def resnext272_2x32d_cifar100(classes=100, **kwargs):
"""
ResNeXt-272 (2x32d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=272, cardinality=2, bottleneck_width=32,
model_name="resnext272_2x32d_cifar100", **kwargs)
def resnext272_2x32d_svhn(classes=10, **kwargs):
"""
ResNeXt-272 (2x32d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=272, cardinality=2, bottleneck_width=32,
model_name="resnext272_2x32d_svhn", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
(resnext20_1x64d_cifar10, 10),
(resnext20_1x64d_cifar100, 100),
(resnext20_1x64d_svhn, 10),
(resnext20_2x32d_cifar10, 10),
(resnext20_2x32d_cifar100, 100),
(resnext20_2x32d_svhn, 10),
(resnext20_2x64d_cifar10, 10),
(resnext20_2x64d_cifar100, 100),
(resnext20_2x64d_svhn, 10),
(resnext20_4x16d_cifar10, 10),
(resnext20_4x16d_cifar100, 100),
(resnext20_4x16d_svhn, 10),
(resnext20_4x32d_cifar10, 10),
(resnext20_4x32d_cifar100, 100),
(resnext20_4x32d_svhn, 10),
(resnext20_8x8d_cifar10, 10),
(resnext20_8x8d_cifar100, 100),
(resnext20_8x8d_svhn, 10),
(resnext20_8x16d_cifar10, 10),
(resnext20_8x16d_cifar100, 100),
(resnext20_8x16d_svhn, 10),
(resnext20_16x4d_cifar10, 10),
(resnext20_16x4d_cifar100, 100),
(resnext20_16x4d_svhn, 10),
(resnext20_16x8d_cifar10, 10),
(resnext20_16x8d_cifar100, 100),
(resnext20_16x8d_svhn, 10),
(resnext20_32x2d_cifar10, 10),
(resnext20_32x2d_cifar100, 100),
(resnext20_32x2d_svhn, 10),
(resnext20_32x4d_cifar10, 10),
(resnext20_32x4d_cifar100, 100),
(resnext20_32x4d_svhn, 10),
(resnext20_64x1d_cifar10, 10),
(resnext20_64x1d_cifar100, 100),
(resnext20_64x1d_svhn, 10),
(resnext20_64x2d_cifar10, 10),
(resnext20_64x2d_cifar100, 100),
(resnext20_64x2d_svhn, 10),
(resnext29_32x4d_cifar10, 10),
(resnext29_32x4d_cifar100, 100),
(resnext29_32x4d_svhn, 10),
(resnext29_16x64d_cifar10, 10),
(resnext29_16x64d_cifar100, 100),
(resnext29_16x64d_svhn, 10),
(resnext56_1x64d_cifar10, 10),
(resnext56_1x64d_cifar100, 100),
(resnext56_1x64d_svhn, 10),
(resnext56_2x32d_cifar10, 10),
(resnext56_2x32d_cifar100, 100),
(resnext56_2x32d_svhn, 10),
(resnext56_4x16d_cifar10, 10),
(resnext56_4x16d_cifar100, 100),
(resnext56_4x16d_svhn, 10),
(resnext56_8x8d_cifar10, 10),
(resnext56_8x8d_cifar100, 100),
(resnext56_8x8d_svhn, 10),
(resnext56_16x4d_cifar10, 10),
(resnext56_16x4d_cifar100, 100),
(resnext56_16x4d_svhn, 10),
(resnext56_32x2d_cifar10, 10),
(resnext56_32x2d_cifar100, 100),
(resnext56_32x2d_svhn, 10),
(resnext56_64x1d_cifar10, 10),
(resnext56_64x1d_cifar100, 100),
(resnext56_64x1d_svhn, 10),
(resnext272_1x64d_cifar10, 10),
(resnext272_1x64d_cifar100, 100),
(resnext272_1x64d_svhn, 10),
(resnext272_2x32d_cifar10, 10),
(resnext272_2x32d_cifar100, 100),
(resnext272_2x32d_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 32, 32) if is_channels_first(data_format) else (batch, 32, 32, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, classes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnext20_1x64d_cifar10 or weight_count == 3446602)
assert (model != resnext20_1x64d_cifar100 or weight_count == 3538852)
assert (model != resnext20_1x64d_svhn or weight_count == 3446602)
assert (model != resnext20_2x32d_cifar10 or weight_count == 2672458)
assert (model != resnext20_2x32d_cifar100 or weight_count == 2764708)
assert (model != resnext20_2x32d_svhn or weight_count == 2672458)
assert (model != resnext20_2x64d_cifar10 or weight_count == 6198602)
assert (model != resnext20_2x64d_cifar100 or weight_count == 6290852)
assert (model != resnext20_2x64d_svhn or weight_count == 6198602)
assert (model != resnext20_4x16d_cifar10 or weight_count == 2285386)
assert (model != resnext20_4x16d_cifar100 or weight_count == 2377636)
assert (model != resnext20_4x16d_svhn or weight_count == 2285386)
assert (model != resnext20_4x32d_cifar10 or weight_count == 4650314)
assert (model != resnext20_4x32d_cifar100 or weight_count == 4742564)
assert (model != resnext20_4x32d_svhn or weight_count == 4650314)
assert (model != resnext20_8x8d_cifar10 or weight_count == 2091850)
assert (model != resnext20_8x8d_cifar100 or weight_count == 2184100)
assert (model != resnext20_8x8d_svhn or weight_count == 2091850)
assert (model != resnext20_8x16d_cifar10 or weight_count == 3876170)
assert (model != resnext20_8x16d_cifar100 or weight_count == 3968420)
assert (model != resnext20_8x16d_svhn or weight_count == 3876170)
assert (model != resnext20_16x4d_cifar10 or weight_count == 1995082)
assert (model != resnext20_16x4d_cifar100 or weight_count == 2087332)
assert (model != resnext20_16x4d_svhn or weight_count == 1995082)
assert (model != resnext20_16x8d_cifar10 or weight_count == 3489098)
assert (model != resnext20_16x8d_cifar100 or weight_count == 3581348)
assert (model != resnext20_16x8d_svhn or weight_count == 3489098)
assert (model != resnext20_32x2d_cifar10 or weight_count == 1946698)
assert (model != resnext20_32x2d_cifar100 or weight_count == 2038948)
assert (model != resnext20_32x2d_svhn or weight_count == 1946698)
assert (model != resnext20_32x4d_cifar10 or weight_count == 3295562)
assert (model != resnext20_32x4d_cifar100 or weight_count == 3387812)
assert (model != resnext20_32x4d_svhn or weight_count == 3295562)
assert (model != resnext20_64x1d_cifar10 or weight_count == 1922506)
assert (model != resnext20_64x1d_cifar100 or weight_count == 2014756)
assert (model != resnext20_64x1d_svhn or weight_count == 1922506)
assert (model != resnext20_64x2d_cifar10 or weight_count == 3198794)
assert (model != resnext20_64x2d_cifar100 or weight_count == 3291044)
assert (model != resnext20_64x2d_svhn or weight_count == 3198794)
assert (model != resnext29_32x4d_cifar10 or weight_count == 4775754)
assert (model != resnext29_32x4d_cifar100 or weight_count == 4868004)
assert (model != resnext29_32x4d_svhn or weight_count == 4775754)
assert (model != resnext29_16x64d_cifar10 or weight_count == 68155210)
assert (model != resnext29_16x64d_cifar100 or weight_count == 68247460)
assert (model != resnext29_16x64d_svhn or weight_count == 68155210)
assert (model != resnext56_1x64d_cifar10 or weight_count == 9317194)
assert (model != resnext56_1x64d_cifar100 or weight_count == 9409444)
assert (model != resnext56_1x64d_svhn or weight_count == 9317194)
assert (model != resnext56_2x32d_cifar10 or weight_count == 6994762)
assert (model != resnext56_2x32d_cifar100 or weight_count == 7087012)
assert (model != resnext56_2x32d_svhn or weight_count == 6994762)
assert (model != resnext56_4x16d_cifar10 or weight_count == 5833546)
assert (model != resnext56_4x16d_cifar100 or weight_count == 5925796)
assert (model != resnext56_4x16d_svhn or weight_count == 5833546)
assert (model != resnext56_8x8d_cifar10 or weight_count == 5252938)
assert (model != resnext56_8x8d_cifar100 or weight_count == 5345188)
assert (model != resnext56_8x8d_svhn or weight_count == 5252938)
assert (model != resnext56_16x4d_cifar10 or weight_count == 4962634)
assert (model != resnext56_16x4d_cifar100 or weight_count == 5054884)
assert (model != resnext56_16x4d_svhn or weight_count == 4962634)
assert (model != resnext56_32x2d_cifar10 or weight_count == 4817482)
assert (model != resnext56_32x2d_cifar100 or weight_count == 4909732)
assert (model != resnext56_32x2d_svhn or weight_count == 4817482)
assert (model != resnext56_64x1d_cifar10 or weight_count == 4744906)
assert (model != resnext56_64x1d_cifar100 or weight_count == 4837156)
assert (model != resnext56_64x1d_svhn or weight_count == 4744906)
assert (model != resnext272_1x64d_cifar10 or weight_count == 44540746)
assert (model != resnext272_1x64d_cifar100 or weight_count == 44632996)
assert (model != resnext272_1x64d_svhn or weight_count == 44540746)
assert (model != resnext272_2x32d_cifar10 or weight_count == 32928586)
assert (model != resnext272_2x32d_cifar100 or weight_count == 33020836)
assert (model != resnext272_2x32d_svhn or weight_count == 32928586)
if __name__ == "__main__":
_test()
| 65,482 | 38.904327 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/densenet_cifar.py | """
DenseNet for CIFAR/SVHN, implemented in TensorFlow.
Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
"""
__all__ = ['CIFARDenseNet', 'densenet40_k12_cifar10', 'densenet40_k12_cifar100', 'densenet40_k12_svhn',
'densenet40_k12_bc_cifar10', 'densenet40_k12_bc_cifar100', 'densenet40_k12_bc_svhn',
'densenet40_k24_bc_cifar10', 'densenet40_k24_bc_cifar100', 'densenet40_k24_bc_svhn',
'densenet40_k36_bc_cifar10', 'densenet40_k36_bc_cifar100', 'densenet40_k36_bc_svhn',
'densenet100_k12_cifar10', 'densenet100_k12_cifar100', 'densenet100_k12_svhn',
'densenet100_k24_cifar10', 'densenet100_k24_cifar100', 'densenet100_k24_svhn',
'densenet100_k12_bc_cifar10', 'densenet100_k12_bc_cifar100', 'densenet100_k12_bc_svhn',
'densenet190_k40_bc_cifar10', 'densenet190_k40_bc_cifar100', 'densenet190_k40_bc_svhn',
'densenet250_k24_bc_cifar10', 'densenet250_k24_bc_cifar100', 'densenet250_k24_bc_svhn']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv3x3, pre_conv3x3_block, SimpleSequential, flatten, get_channel_axis, is_channels_first
from .preresnet import PreResActivation
from .densenet import DenseUnit, TransitionBlock
class DenseSimpleUnit(nn.Layer):
"""
DenseNet simple unit for CIFAR.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
dropout_rate,
data_format="channels_last",
**kwargs):
super(DenseSimpleUnit, self).__init__(**kwargs)
self.data_format = data_format
self.use_dropout = (dropout_rate != 0.0)
inc_channels = out_channels - in_channels
self.conv = pre_conv3x3_block(
in_channels=in_channels,
out_channels=inc_channels,
data_format=data_format,
name="conv")
if self.use_dropout:
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
def call(self, x, training=None):
identity = x
x = self.conv(x, training=training)
if self.use_dropout:
x = self.dropout(x, training=training)
x = tf.concat([identity, x], axis=get_channel_axis(self.data_format))
return x
class CIFARDenseNet(tf.keras.Model):
"""
DenseNet model for CIFAR from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
dropout_rate=0.0,
in_channels=3,
in_size=(32, 32),
classes=10,
data_format="channels_last",
**kwargs):
super(CIFARDenseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
unit_class = DenseUnit if bottleneck else DenseSimpleUnit
self.features = SimpleSequential(name="features")
self.features.add(conv3x3(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
if i != 0:
stage.add(TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2),
data_format=data_format,
name="trans{}".format(i + 1)))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
stage.add(unit_class(
in_channels=in_channels,
out_channels=out_channels,
dropout_rate=dropout_rate,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
data_format=data_format,
name="post_activ"))
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_densenet_cifar(classes,
blocks,
growth_rate,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DenseNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
growth_rate : int
Growth rate.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 4) % 6 == 0)
layers = [(blocks - 4) // 6] * 3
else:
assert ((blocks - 4) % 3 == 0)
layers = [(blocks - 4) // 3] * 3
init_block_channels = 2 * growth_rate
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = CIFARDenseNet(
channels=channels,
init_block_channels=init_block_channels,
classes=classes,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def densenet40_k12_cifar10(classes=10, **kwargs):
"""
DenseNet-40 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=False,
model_name="densenet40_k12_cifar10", **kwargs)
def densenet40_k12_cifar100(classes=100, **kwargs):
"""
DenseNet-40 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=False,
model_name="densenet40_k12_cifar100", **kwargs)
def densenet40_k12_svhn(classes=10, **kwargs):
"""
DenseNet-40 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=False,
model_name="densenet40_k12_svhn", **kwargs)
def densenet40_k12_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-40 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=True,
model_name="densenet40_k12_bc_cifar10", **kwargs)
def densenet40_k12_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-40 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=True,
model_name="densenet40_k12_bc_cifar100", **kwargs)
def densenet40_k12_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-40 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=True,
model_name="densenet40_k12_bc_svhn", **kwargs)
def densenet40_k24_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-40 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="densenet40_k24_bc_cifar10", **kwargs)
def densenet40_k24_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-40 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="densenet40_k24_bc_cifar100", **kwargs)
def densenet40_k24_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-40 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="densenet40_k24_bc_svhn", **kwargs)
def densenet40_k36_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-40 (k=36) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="densenet40_k36_bc_cifar10", **kwargs)
def densenet40_k36_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-40 (k=36) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="densenet40_k36_bc_cifar100", **kwargs)
def densenet40_k36_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-40 (k=36) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="densenet40_k36_bc_svhn", **kwargs)
def densenet100_k12_cifar10(classes=10, **kwargs):
"""
DenseNet-100 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False,
model_name="densenet100_k12_cifar10", **kwargs)
def densenet100_k12_cifar100(classes=100, **kwargs):
"""
DenseNet-100 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False,
model_name="densenet100_k12_cifar100", **kwargs)
def densenet100_k12_svhn(classes=10, **kwargs):
"""
DenseNet-100 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False,
model_name="densenet100_k12_svhn", **kwargs)
def densenet100_k24_cifar10(classes=10, **kwargs):
"""
DenseNet-100 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False,
model_name="densenet100_k24_cifar10", **kwargs)
def densenet100_k24_cifar100(classes=100, **kwargs):
"""
DenseNet-100 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False,
model_name="densenet100_k24_cifar100", **kwargs)
def densenet100_k24_svhn(classes=10, **kwargs):
"""
DenseNet-100 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False,
model_name="densenet100_k24_svhn", **kwargs)
def densenet100_k12_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-100 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True,
model_name="densenet100_k12_bc_cifar10", **kwargs)
def densenet100_k12_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-100 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True,
model_name="densenet100_k12_bc_cifar100", **kwargs)
def densenet100_k12_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-100 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True,
model_name="densenet100_k12_bc_svhn", **kwargs)
def densenet190_k40_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-190 (k=40) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True,
model_name="densenet190_k40_bc_cifar10", **kwargs)
def densenet190_k40_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-190 (k=40) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True,
model_name="densenet190_k40_bc_cifar100", **kwargs)
def densenet190_k40_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-190 (k=40) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True,
model_name="densenet190_k40_bc_svhn", **kwargs)
def densenet250_k24_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-250 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True,
model_name="densenet250_k24_bc_cifar10", **kwargs)
def densenet250_k24_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-250 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True,
model_name="densenet250_k24_bc_cifar100", **kwargs)
def densenet250_k24_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-250 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True,
model_name="densenet250_k24_bc_svhn", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
(densenet40_k12_cifar10, 10),
(densenet40_k12_cifar100, 100),
(densenet40_k12_svhn, 10),
(densenet40_k12_bc_cifar10, 10),
(densenet40_k12_bc_cifar100, 100),
(densenet40_k12_bc_svhn, 10),
(densenet40_k24_bc_cifar10, 10),
(densenet40_k24_bc_cifar100, 100),
(densenet40_k24_bc_svhn, 10),
(densenet40_k36_bc_cifar10, 10),
(densenet40_k36_bc_cifar100, 100),
(densenet40_k36_bc_svhn, 10),
(densenet100_k12_cifar10, 10),
(densenet100_k12_cifar100, 100),
(densenet100_k12_svhn, 10),
(densenet100_k24_cifar10, 10),
(densenet100_k24_cifar100, 100),
(densenet100_k24_svhn, 10),
(densenet100_k12_bc_cifar10, 10),
(densenet100_k12_bc_cifar100, 100),
(densenet100_k12_bc_svhn, 10),
(densenet190_k40_bc_cifar10, 10),
(densenet190_k40_bc_cifar100, 100),
(densenet190_k40_bc_svhn, 10),
(densenet250_k24_bc_cifar10, 10),
(densenet250_k24_bc_cifar100, 100),
(densenet250_k24_bc_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 32, 32) if is_channels_first(data_format) else (batch, 32, 32, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, classes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != densenet40_k12_cifar10 or weight_count == 599050)
assert (model != densenet40_k12_cifar100 or weight_count == 622360)
assert (model != densenet40_k12_svhn or weight_count == 599050)
assert (model != densenet40_k12_bc_cifar10 or weight_count == 176122)
assert (model != densenet40_k12_bc_cifar100 or weight_count == 188092)
assert (model != densenet40_k12_bc_svhn or weight_count == 176122)
assert (model != densenet40_k24_bc_cifar10 or weight_count == 690346)
assert (model != densenet40_k24_bc_cifar100 or weight_count == 714196)
assert (model != densenet40_k24_bc_svhn or weight_count == 690346)
assert (model != densenet40_k36_bc_cifar10 or weight_count == 1542682)
assert (model != densenet40_k36_bc_cifar100 or weight_count == 1578412)
assert (model != densenet40_k36_bc_svhn or weight_count == 1542682)
assert (model != densenet100_k12_cifar10 or weight_count == 4068490)
assert (model != densenet100_k12_cifar100 or weight_count == 4129600)
assert (model != densenet100_k12_svhn or weight_count == 4068490)
assert (model != densenet100_k24_cifar10 or weight_count == 16114138)
assert (model != densenet100_k24_cifar100 or weight_count == 16236268)
assert (model != densenet100_k24_svhn or weight_count == 16114138)
assert (model != densenet100_k12_bc_cifar10 or weight_count == 769162)
assert (model != densenet100_k12_bc_cifar100 or weight_count == 800032)
assert (model != densenet100_k12_bc_svhn or weight_count == 769162)
assert (model != densenet190_k40_bc_cifar10 or weight_count == 25624430)
assert (model != densenet190_k40_bc_cifar100 or weight_count == 25821620)
assert (model != densenet190_k40_bc_svhn or weight_count == 25624430)
assert (model != densenet250_k24_bc_cifar10 or weight_count == 15324406)
assert (model != densenet250_k24_bc_cifar100 or weight_count == 15480556)
assert (model != densenet250_k24_bc_svhn or weight_count == 15324406)
if __name__ == "__main__":
_test()
| 30,185 | 37.16182 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/bninception.py | """
BN-Inception for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift,'
https://arxiv.org/abs/1502.03167.
"""
__all__ = ['BNInception', 'bninception']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, conv7x7_block, MaxPool2d, AvgPool2d, Concurrent, SimpleSequential,\
flatten, is_channels_first
class Inception3x3Branch(nn.Layer):
"""
BN-Inception 3x3 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of intermediate channels.
strides : int or tuple/list of 2 int, default 1
Strides of the second convolution.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
strides=1,
use_bias=True,
use_bn=True,
data_format="channels_last",
**kwargs):
super(Inception3x3Branch, self).__init__(**kwargs)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class InceptionDouble3x3Branch(nn.Layer):
"""
BN-Inception double 3x3 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of intermediate channels.
strides : int or tuple/list of 2 int, default 1
Strides of the second convolution.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
strides=1,
use_bias=True,
use_bn=True,
data_format="channels_last",
**kwargs):
super(InceptionDouble3x3Branch, self).__init__(**kwargs)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class InceptionPoolBranch(nn.Layer):
"""
BN-Inception avg-pool branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
avg_pool : bool
Whether use average pooling or max pooling.
use_bias : bool
Whether the convolution layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
avg_pool,
use_bias,
use_bn,
data_format="channels_last",
**kwargs):
super(InceptionPoolBranch, self).__init__(**kwargs)
if avg_pool:
self.pool = AvgPool2d(
pool_size=3,
strides=1,
padding=1,
ceil_mode=True,
# count_include_pad=True,
data_format=data_format,
name="pool")
else:
self.pool = MaxPool2d(
pool_size=3,
strides=1,
padding=1,
ceil_mode=True,
data_format=data_format,
name="pool")
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.pool(x)
x = self.conv(x, training=training)
return x
class StemBlock(nn.Layer):
"""
BN-Inception stem block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of intermediate channels.
use_bias : bool
Whether the convolution layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
use_bias,
use_bn,
data_format="channels_last",
**kwargs):
super(StemBlock, self).__init__(**kwargs)
self.conv1 = conv7x7_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv1")
self.pool1 = MaxPool2d(
pool_size=3,
strides=2,
padding=0,
ceil_mode=True,
data_format=data_format,
name="pool1")
self.conv2 = Inception3x3Branch(
in_channels=mid_channels,
out_channels=out_channels,
mid_channels=mid_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv2")
self.pool2 = MaxPool2d(
pool_size=3,
strides=2,
padding=0,
ceil_mode=True,
data_format=data_format,
name="pool2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.pool1(x)
x = self.conv2(x, training=training)
x = self.pool2(x)
return x
class InceptionBlock(nn.Layer):
"""
BN-Inception unit.
Parameters:
----------
in_channels : int
Number of input channels.
mid1_channels_list : list of int
Number of pre-middle channels for branches.
mid2_channels_list : list of int
Number of middle channels for branches.
avg_pool : bool
Whether use average pooling or max pooling.
use_bias : bool
Whether the convolution layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
mid1_channels_list,
mid2_channels_list,
avg_pool,
use_bias,
use_bn,
data_format="channels_last",
**kwargs):
super(InceptionBlock, self).__init__(**kwargs)
assert (len(mid1_channels_list) == 2)
assert (len(mid2_channels_list) == 4)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(conv1x1_block(
in_channels=in_channels,
out_channels=mid2_channels_list[0],
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="branch1"))
self.branches.children.append(Inception3x3Branch(
in_channels=in_channels,
out_channels=mid2_channels_list[1],
mid_channels=mid1_channels_list[0],
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="branch2"))
self.branches.children.append(InceptionDouble3x3Branch(
in_channels=in_channels,
out_channels=mid2_channels_list[2],
mid_channels=mid1_channels_list[1],
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="branch3"))
self.branches.children.append(InceptionPoolBranch(
in_channels=in_channels,
out_channels=mid2_channels_list[3],
avg_pool=avg_pool,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="branch4"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class ReductionBlock(nn.Layer):
"""
BN-Inception reduction block.
Parameters:
----------
in_channels : int
Number of input channels.
mid1_channels_list : list of int
Number of pre-middle channels for branches.
mid2_channels_list : list of int
Number of middle channels for branches.
use_bias : bool
Whether the convolution layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
mid1_channels_list,
mid2_channels_list,
use_bias,
use_bn,
data_format="channels_last",
**kwargs):
super(ReductionBlock, self).__init__(**kwargs)
assert (len(mid1_channels_list) == 2)
assert (len(mid2_channels_list) == 4)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Inception3x3Branch(
in_channels=in_channels,
out_channels=mid2_channels_list[1],
mid_channels=mid1_channels_list[0],
strides=2,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="branch1"))
self.branches.children.append(InceptionDouble3x3Branch(
in_channels=in_channels,
out_channels=mid2_channels_list[2],
mid_channels=mid1_channels_list[1],
strides=2,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="branch2"))
self.branches.children.append(MaxPool2d(
pool_size=3,
strides=2,
padding=0,
ceil_mode=True,
data_format=data_format,
name="branch3"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class BNInception(tf.keras.Model):
"""
BN-Inception model from 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate
Shift,' https://arxiv.org/abs/1502.03167.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels_list : list of int
Number of output channels for the initial unit.
mid1_channels_list : list of list of list of int
Number of pre-middle channels for each unit.
mid2_channels_list : list of list of list of int
Number of middle channels for each unit.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels_list,
mid1_channels_list,
mid2_channels_list,
use_bias=True,
use_bn=True,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(BNInception, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(StemBlock(
in_channels=in_channels,
out_channels=init_block_channels_list[1],
mid_channels=init_block_channels_list[0],
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels_list[-1]
for i, channels_per_stage in enumerate(channels):
mid1_channels_list_i = mid1_channels_list[i]
mid2_channels_list_i = mid2_channels_list[i]
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
if (j == 0) and (i != 0):
stage.add(ReductionBlock(
in_channels=in_channels,
mid1_channels_list=mid1_channels_list_i[j],
mid2_channels_list=mid2_channels_list_i[j],
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="unit{}".format(j + 1)))
else:
avg_pool = (i != len(channels) - 1) or (j != len(channels_per_stage) - 1)
stage.add(InceptionBlock(
in_channels=in_channels,
mid1_channels_list=mid1_channels_list_i[j],
mid2_channels_list=mid2_channels_list_i[j],
avg_pool=avg_pool,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_bninception(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create BN-Inception model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels_list = [64, 192]
channels = [[256, 320], [576, 576, 576, 608, 608], [1056, 1024, 1024]]
mid1_channels_list = [
[[64, 64],
[64, 64]],
[[128, 64], # 3c
[64, 96], # 4a
[96, 96], # 4a
[128, 128], # 4c
[128, 160]], # 4d
[[128, 192], # 4e
[192, 160], # 5a
[192, 192]],
]
mid2_channels_list = [
[[64, 64, 96, 32],
[64, 96, 96, 64]],
[[0, 160, 96, 0], # 3c
[224, 96, 128, 128], # 4a
[192, 128, 128, 128], # 4b
[160, 160, 160, 128], # 4c
[96, 192, 192, 128]], # 4d
[[0, 192, 256, 0], # 4e
[352, 320, 224, 128], # 5a
[352, 320, 224, 128]],
]
net = BNInception(
channels=channels,
init_block_channels_list=init_block_channels_list,
mid1_channels_list=mid1_channels_list,
mid2_channels_list=mid2_channels_list,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def bninception(**kwargs):
"""
BN-Inception model from 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate
Shift,' https://arxiv.org/abs/1502.03167.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_bninception(model_name="bninception", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
bninception,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != bninception or weight_count == 11295240)
if __name__ == "__main__":
_test()
| 19,733 | 31.726368 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/zfnet.py | """
ZFNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
"""
__all__ = ['zfnet', 'zfnetb']
import os
import tensorflow as tf
from .alexnet import AlexNet
def get_zfnet(version="a",
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ZFNet model with specific parameters.
Parameters:
----------
version : str, default 'a'
Version of ZFNet ('a' or 'b').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "a":
channels = [[96], [256], [384, 384, 256]]
kernel_sizes = [[7], [5], [3, 3, 3]]
strides = [[2], [2], [1, 1, 1]]
paddings = [[1], [0], [1, 1, 1]]
use_lrn = True
elif version == "b":
channels = [[96], [256], [512, 1024, 512]]
kernel_sizes = [[7], [5], [3, 3, 3]]
strides = [[2], [2], [1, 1, 1]]
paddings = [[1], [0], [1, 1, 1]]
use_lrn = True
else:
raise ValueError("Unsupported ZFNet version {}".format(version))
net = AlexNet(
channels=channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
use_lrn=use_lrn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def zfnet(**kwargs):
"""
ZFNet model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_zfnet(model_name="zfnet", **kwargs)
def zfnetb(**kwargs):
"""
ZFNet-b model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_zfnet(version="b", model_name="zfnetb", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
zfnet,
zfnetb,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != zfnet or weight_count == 62357608)
assert (model != zfnetb or weight_count == 107627624)
if __name__ == "__main__":
_test()
| 3,844 | 29.275591 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/peleenet.py | """
PeleeNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Pelee: A Real-Time Object Detection System on Mobile Devices,' https://arxiv.org/abs/1804.06882.
"""
__all__ = ['PeleeNet', 'peleenet']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, Concurrent, MaxPool2d, AvgPool2d, SimpleSequential, flatten,\
is_channels_first, get_channel_axis
class PeleeBranch1(nn.Layer):
"""
PeleeNet branch type 1 block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of intermediate channels.
strides : int or tuple/list of 2 int, default 1
Strides of the second convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
strides=1,
data_format="channels_last",
**kwargs):
super(PeleeBranch1, self).__init__(**kwargs)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class PeleeBranch2(nn.Layer):
"""
PeleeNet branch type 2 block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of intermediate channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
data_format="channels_last",
**kwargs):
super(PeleeBranch2, self).__init__(**kwargs)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class StemBlock(nn.Layer):
"""
PeleeNet stem block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(StemBlock, self).__init__(**kwargs)
mid1_channels = out_channels // 2
mid2_channels = out_channels * 2
self.first_conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
data_format=data_format,
name="first_conv")
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(PeleeBranch1(
in_channels=out_channels,
out_channels=out_channels,
mid_channels=mid1_channels,
strides=2,
data_format=data_format,
name="branch1"))
self.branches.add(MaxPool2d(
pool_size=2,
strides=2,
padding=0,
data_format=data_format,
name="branch2"))
self.last_conv = conv1x1_block(
in_channels=mid2_channels,
out_channels=out_channels,
data_format=data_format,
name="last_conv")
def call(self, x, training=None):
x = self.first_conv(x, training=training)
x = self.branches(x, training=training)
x = self.last_conv(x, training=training)
return x
class DenseBlock(nn.Layer):
"""
PeleeNet dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_size : int
Bottleneck width.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_size,
data_format="channels_last",
**kwargs):
super(DenseBlock, self).__init__(**kwargs)
self.data_format = data_format
inc_channels = (out_channels - in_channels) // 2
mid_channels = inc_channels * bottleneck_size
self.branch1 = PeleeBranch1(
in_channels=in_channels,
out_channels=inc_channels,
mid_channels=mid_channels,
data_format=data_format,
name="branch1")
self.branch2 = PeleeBranch2(
in_channels=in_channels,
out_channels=inc_channels,
mid_channels=mid_channels,
data_format=data_format,
name="branch2")
def call(self, x, training=None):
x1 = self.branch1(x, training=training)
x2 = self.branch2(x, training=training)
x = tf.concat([x, x1, x2], axis=get_channel_axis(self.data_format))
return x
class TransitionBlock(nn.Layer):
"""
PeleeNet's transition block, like in DensNet, but with ordinary convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(TransitionBlock, self).__init__(**kwargs)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv")
self.pool = AvgPool2d(
pool_size=2,
strides=2,
padding=0,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.pool(x)
return x
class PeleeNet(tf.keras.Model):
"""
PeleeNet model from 'Pelee: A Real-Time Object Detection System on Mobile Devices,'
https://arxiv.org/abs/1804.06882.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck_sizes : list of int
Bottleneck sizes for each stage.
dropout_rate : float, default 0.5
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck_sizes,
dropout_rate=0.5,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(PeleeNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(StemBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
bottleneck_size = bottleneck_sizes[i]
stage = SimpleSequential(name="stage{}".format(i + 1))
if i != 0:
stage.add(TransitionBlock(
in_channels=in_channels,
out_channels=in_channels,
data_format=data_format,
name="trans{}".format(i + 1)))
for j, out_channels in enumerate(channels_per_stage):
stage.add(DenseBlock(
in_channels=in_channels,
out_channels=out_channels,
bottleneck_size=bottleneck_size,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=in_channels,
data_format=data_format,
name="final_block"))
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = SimpleSequential(name="output1")
if dropout_rate > 0.0:
self.output1.add(nn.Dropout(
rate=dropout_rate,
name="dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=in_channels,
name="fc"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_peleenet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create PeleeNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
growth_rate = 32
layers = [3, 4, 8, 6]
bottleneck_sizes = [1, 2, 4, 4]
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1]])[1:]],
layers,
[[init_block_channels]])[1:]
net = PeleeNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck_sizes=bottleneck_sizes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def peleenet(**kwargs):
"""
PeleeNet model from 'Pelee: A Real-Time Object Detection System on Mobile Devices,'
https://arxiv.org/abs/1804.06882.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_peleenet(model_name="peleenet", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
peleenet,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != peleenet or weight_count == 2802248)
if __name__ == "__main__":
_test()
| 13,598 | 30.552204 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/ibppose_coco.py | """
IBPPose for COCO Keypoint, implemented in TensorFlow.
Original paper: 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person Pose Estimation,'
https://arxiv.org/abs/1911.10529.
"""
__all__ = ['IbpPose', 'ibppose_coco']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import get_activation_layer, MaxPool2d, conv1x1_block, conv3x3_block, conv7x7_block, SEBlock, Hourglass,\
InterpolationBlock, SimpleSequential, is_channels_first, get_channel_axis
class IbpResBottleneck(nn.Layer):
"""
Bottleneck block for residual path in the residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
bottleneck_factor : int, default 2
Bottleneck factor.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=False,
bottleneck_factor=2,
activation="relu",
data_format="channels_last",
**kwargs):
super(IbpResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=use_bias,
activation=activation,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
use_bias=use_bias,
activation=activation,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=use_bias,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class IbpResUnit(nn.Layer):
"""
ResNet-like residual unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
bottleneck_factor : int, default 2
Bottleneck factor.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides=1,
use_bias=False,
bottleneck_factor=2,
activation="relu",
data_format="channels_last",
**kwargs):
super(IbpResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = IbpResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
bottleneck_factor=bottleneck_factor,
activation=activation,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = get_activation_layer(activation)
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
return x
class IbpBackbone(nn.Layer):
"""
IBPPose backbone.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activation : function or str or None
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
activation,
data_format="channels_last",
**kwargs):
super(IbpBackbone, self).__init__(**kwargs)
self.data_format = data_format
dilations = (3, 3, 4, 4, 5, 5)
mid1_channels = out_channels // 4
mid2_channels = out_channels // 2
self.conv1 = conv7x7_block(
in_channels=in_channels,
out_channels=mid1_channels,
strides=2,
activation=activation,
data_format=data_format,
name="conv1")
self.res1 = IbpResUnit(
in_channels=mid1_channels,
out_channels=mid2_channels,
activation=activation,
data_format=data_format,
name="res1")
self.pool = MaxPool2d(
pool_size=2,
strides=2,
data_format=data_format,
name="pool")
self.res2 = IbpResUnit(
in_channels=mid2_channels,
out_channels=mid2_channels,
activation=activation,
data_format=data_format,
name="res2")
self.dilation_branch = SimpleSequential(name="dilation_branch")
for i, dilation in enumerate(dilations):
self.dilation_branch.add(conv3x3_block(
in_channels=mid2_channels,
out_channels=mid2_channels,
padding=dilation,
dilation=dilation,
activation=activation,
data_format=data_format,
name="block{}".format(i + 1)))
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.res1(x, training=training)
x = self.pool(x, training=training)
x = self.res2(x, training=training)
y = self.dilation_branch(x, training=training)
x = tf.concat([x, y], axis=get_channel_axis(self.data_format))
return x
class IbpDownBlock(nn.Layer):
"""
IBPPose down block for the hourglass.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activation : function or str or None
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
activation,
data_format="channels_last",
**kwargs):
super(IbpDownBlock, self).__init__(**kwargs)
self.down = MaxPool2d(
pool_size=2,
strides=2,
data_format=data_format,
name="down")
self.res = IbpResUnit(
in_channels=in_channels,
out_channels=out_channels,
activation=activation,
data_format=data_format,
name="res")
def call(self, x, training=None):
x = self.down(x, training=training)
x = self.res(x, training=training)
return x
class IbpUpBlock(nn.Layer):
"""
IBPPose up block for the hourglass.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_bn : bool
Whether to use BatchNorm layer.
activation : function or str or None
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
use_bn,
activation,
data_format="channels_last",
**kwargs):
super(IbpUpBlock, self).__init__(**kwargs)
self.res = IbpResUnit(
in_channels=in_channels,
out_channels=out_channels,
activation=activation,
data_format=data_format,
name="res")
self.up = InterpolationBlock(
scale_factor=2,
interpolation="nearest",
data_format=data_format,
name="up")
self.conv = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=(not use_bn),
use_bn=use_bn,
activation=activation,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.res(x, training=training)
x = self.up(x, training=training)
x = self.conv(x, training=training)
return x
class MergeBlock(nn.Layer):
"""
IBPPose merge block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_bn : bool
Whether to use BatchNorm layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
use_bn,
data_format="channels_last",
**kwargs):
super(MergeBlock, self).__init__(**kwargs)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=(not use_bn),
use_bn=use_bn,
activation=None,
data_format=data_format,
name="conv")
def call(self, x, training=None):
return self.conv(x, training=training)
class IbpPreBlock(nn.Layer):
"""
IBPPose preliminary decoder block.
Parameters:
----------
out_channels : int
Number of output channels.
use_bn : bool
Whether to use BatchNorm layer.
activation : function or str or None
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
out_channels,
use_bn,
activation,
data_format="channels_last",
**kwargs):
super(IbpPreBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=(not use_bn),
use_bn=use_bn,
activation=activation,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=(not use_bn),
use_bn=use_bn,
activation=activation,
data_format=data_format,
name="conv2")
self.se = SEBlock(
channels=out_channels,
use_conv=False,
mid_activation=activation,
data_format=data_format,
name="se")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.se(x, training=training)
return x
class IbpPass(nn.Layer):
"""
IBPPose single pass decoder block.
Parameters:
----------
channels : int
Number of input/output channels.
mid_channels : int
Number of middle channels.
depth : int
Depth of hourglass.
growth_rate : int
Addition for number of channel for each level.
use_bn : bool
Whether to use BatchNorm layer.
activation : function or str or None
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
mid_channels,
depth,
growth_rate,
merge,
use_bn,
activation,
data_format="channels_last",
**kwargs):
super(IbpPass, self).__init__(**kwargs)
self.merge = merge
down_seq = SimpleSequential(name="down_seq")
up_seq = SimpleSequential(name="up_seq")
skip_seq = SimpleSequential(name="skip_seq")
top_channels = channels
bottom_channels = channels
for i in range(depth + 1):
skip_seq.add(IbpResUnit(
in_channels=top_channels,
out_channels=top_channels,
activation=activation,
data_format=data_format,
name="skip{}".format(i + 1)))
bottom_channels += growth_rate
if i < depth:
down_seq.add(IbpDownBlock(
in_channels=top_channels,
out_channels=bottom_channels,
activation=activation,
data_format=data_format,
name="down{}".format(i + 1)))
up_seq.add(IbpUpBlock(
in_channels=bottom_channels,
out_channels=top_channels,
use_bn=use_bn,
activation=activation,
data_format=data_format,
name="up{}".format(i + 1)))
top_channels = bottom_channels
self.hg = Hourglass(
down_seq=down_seq,
up_seq=up_seq,
skip_seq=skip_seq,
name="hg")
self.pre_block = IbpPreBlock(
out_channels=channels,
use_bn=use_bn,
activation=activation,
data_format=data_format,
name="pre_block")
self.post_block = conv1x1_block(
in_channels=channels,
out_channels=mid_channels,
use_bias=True,
use_bn=False,
activation=None,
data_format=data_format,
name="post_block")
if self.merge:
self.pre_merge_block = MergeBlock(
in_channels=channels,
out_channels=channels,
use_bn=use_bn,
data_format=data_format,
name="pre_merge_block")
self.post_merge_block = MergeBlock(
in_channels=mid_channels,
out_channels=channels,
use_bn=use_bn,
data_format=data_format,
name="post_merge_block")
def call(self, x, x_prev, training=None):
x = self.hg(x, training=training)
if x_prev is not None:
x = x + x_prev
y = self.pre_block(x, training=training)
z = self.post_block(y, training=training)
if self.merge:
z = self.post_merge_block(z, training=training) + self.pre_merge_block(y, training=training)
return z
class IbpPose(tf.keras.Model):
"""
IBPPose model from 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person Pose Estimation,'
https://arxiv.org/abs/1911.10529.
Parameters:
----------
passes : int
Number of passes.
backbone_out_channels : int
Number of output channels for the backbone.
outs_channels : int
Number of output channels for the backbone.
depth : int
Depth of hourglass.
growth_rate : int
Addition for number of channel for each level.
use_bn : bool
Whether to use BatchNorm layer.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 256)
Spatial size of the expected input image.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
passes,
backbone_out_channels,
outs_channels,
depth,
growth_rate,
use_bn,
in_channels=3,
in_size=(256, 256),
data_format="channels_last",
**kwargs):
super(IbpPose, self).__init__(**kwargs)
self.in_size = in_size
self.data_format = data_format
activation = nn.LeakyReLU(alpha=0.01)
self.backbone = IbpBackbone(
in_channels=in_channels,
out_channels=backbone_out_channels,
activation=activation,
data_format=data_format,
name="backbone")
self.decoder = SimpleSequential(name="decoder")
for i in range(passes):
merge = (i != passes - 1)
self.decoder.add(IbpPass(
channels=backbone_out_channels,
mid_channels=outs_channels,
depth=depth,
growth_rate=growth_rate,
merge=merge,
use_bn=use_bn,
activation=activation,
data_format=data_format,
name="pass{}".format(i + 1)))
def call(self, x, training=None):
x = self.backbone(x, training=training)
x_prev = None
for block in self.decoder.children:
if x_prev is not None:
x = x + x_prev
x_prev = block(x, x_prev, training=training)
return x_prev
def get_ibppose(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create IBPPose model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
passes = 4
backbone_out_channels = 256
outs_channels = 50
depth = 4
growth_rate = 128
use_bn = True
net = IbpPose(
passes=passes,
backbone_out_channels=backbone_out_channels,
outs_channels=outs_channels,
depth=depth,
growth_rate=growth_rate,
use_bn=use_bn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def ibppose_coco(data_format="channels_last", **kwargs):
"""
IBPPose model for COCO Keypoint from 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person
Pose Estimation,' https://arxiv.org/abs/1911.10529.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ibppose(model_name="ibppose_coco", data_format=data_format, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
# os.environ["TF_CUDNN_DETERMINISTIC"] = "1"
# os.environ["TF_DETERMINISTIC_OPS"] = "1"
data_format = "channels_last"
# data_format = "channels_first"
in_size = (256, 256)
pretrained = False
models = [
ibppose_coco,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
y = net(x)
assert (y.shape[0] == batch)
if is_channels_first(data_format):
assert ((y.shape[1] == 50) and (y.shape[2] == x.shape[2] // 4) and
(y.shape[3] == x.shape[3] // 4))
else:
assert ((y.shape[3] == 50) and (y.shape[1] == x.shape[1] // 4) and
(y.shape[2] == x.shape[2] // 4))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibppose_coco or weight_count == 95827784)
if __name__ == "__main__":
_test()
| 22,000 | 31.402062 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/xception.py | """
Xception for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Xception: Deep Learning with Depthwise Separable Convolutions,' https://arxiv.org/abs/1610.02357.
"""
__all__ = ['Xception', 'xception']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import Conv2d, BatchNorm, MaxPool2d, AvgPool2d, conv1x1_block, conv3x3_block, flatten,\
SimpleSequential, is_channels_first
class DwsConv(nn.Layer):
"""
Depthwise separable convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
data_format="channels_last",
**kwargs):
super(DwsConv, self).__init__(**kwargs)
self.dw_conv = Conv2d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=in_channels,
use_bias=False,
data_format=data_format,
name="dw_conv")
self.pw_conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
use_bias=False,
data_format=data_format,
name="pw_conv")
def call(self, x, training=None):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
class DwsConvBlock(nn.Layer):
"""
Depthwise separable convolution block with batchnorm and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
activate,
data_format="channels_last",
**kwargs):
super(DwsConvBlock, self).__init__(**kwargs)
self.activate = activate
if self.activate:
self.activ = nn.ReLU()
self.conv = DwsConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
name="conv")
self.bn = BatchNorm(
data_format=data_format,
name="bn")
def call(self, x, training=None):
if self.activate:
x = self.activ(x)
x = self.conv(x)
x = self.bn(x, training=training)
return x
def dws_conv3x3_block(in_channels,
out_channels,
activate,
data_format="channels_last",
**kwargs):
"""
3x3 version of the depthwise separable convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activate : bool
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=1,
padding=1,
activate=activate,
data_format=data_format,
**kwargs)
class XceptionUnit(nn.Layer):
"""
Xception unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the downsample polling.
reps : int
Number of repetitions.
start_with_relu : bool, default True
Whether start with ReLU activation.
grow_first : bool, default True
Whether start from growing.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
reps,
start_with_relu=True,
grow_first=True,
data_format="channels_last",
**kwargs):
super(XceptionUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.body = SimpleSequential(name="body")
for i in range(reps):
if (grow_first and (i == 0)) or ((not grow_first) and (i == reps - 1)):
in_channels_i = in_channels
out_channels_i = out_channels
else:
if grow_first:
in_channels_i = out_channels
out_channels_i = out_channels
else:
in_channels_i = in_channels
out_channels_i = in_channels
activate = start_with_relu if (i == 0) else True
self.body.children.append(dws_conv3x3_block(
in_channels=in_channels_i,
out_channels=out_channels_i,
activate=activate,
data_format=data_format,
name="block{}".format(i + 1)))
if strides != 1:
self.body.children.append(MaxPool2d(
pool_size=3,
strides=strides,
padding=1,
data_format=data_format,
name="pool"))
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = tf.identity(x)
x = self.body(x, training=training)
x = x + identity
return x
class XceptionInitBlock(nn.Layer):
"""
Xception specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
data_format="channels_last",
**kwargs):
super(XceptionInitBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=64,
strides=1,
padding=0,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class XceptionFinalBlock(nn.Layer):
"""
Xception specific final block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(XceptionFinalBlock, self).__init__(**kwargs)
self.conv1 = dws_conv3x3_block(
in_channels=1024,
out_channels=1536,
activate=False,
data_format=data_format,
name="conv1")
self.conv2 = dws_conv3x3_block(
in_channels=1536,
out_channels=2048,
activate=True,
data_format=data_format,
name="conv2")
self.activ = nn.ReLU()
self.pool = AvgPool2d(
pool_size=10,
strides=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.activ(x)
x = self.pool(x)
return x
class Xception(tf.keras.Model):
"""
Xception model from 'Xception: Deep Learning with Depthwise Separable Convolutions,'
https://arxiv.org/abs/1610.02357.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
in_channels=3,
in_size=(299, 299),
classes=1000,
data_format="channels_last",
**kwargs):
super(Xception, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(XceptionInitBlock(
in_channels=in_channels,
data_format=data_format,
name="init_block"))
in_channels = 64
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
stage.add(XceptionUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=(2 if (j == 0) else 1),
reps=(2 if (j == 0) else 3),
start_with_relu=((i != 0) or (j != 0)),
grow_first=((i != len(channels) - 1) or (j != len(channels_per_stage) - 1)),
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(XceptionFinalBlock(
data_format=data_format,
name="final_block"))
self.output1 = nn.Dense(
units=classes,
input_dim=2048,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_xception(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create Xception model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
channels = [[128], [256], [728] * 9, [1024]]
net = Xception(
channels=channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def xception(**kwargs):
"""
Xception model from 'Xception: Deep Learning with Depthwise Separable Convolutions,'
https://arxiv.org/abs/1610.02357.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_xception(model_name="xception", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
xception,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 299, 299) if is_channels_first(data_format) else (batch, 299, 299, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != xception or weight_count == 22855952)
if __name__ == "__main__":
_test()
| 14,191 | 30.191209 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/darknet53.py | """
DarkNet-53 for ImageNet-1K, implemented in TensorFlow.
Original source: 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
"""
__all__ = ['DarkNet53', 'darknet53']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, SimpleSequential, flatten
class DarkUnit(nn.Layer):
"""
DarkNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
alpha : float
Slope coefficient for Leaky ReLU activation.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
alpha,
data_format="channels_last",
**kwargs):
super(DarkUnit, self).__init__(**kwargs)
assert (out_channels % 2 == 0)
mid_channels = out_channels // 2
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
activation=nn.LeakyReLU(alpha=alpha),
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=nn.LeakyReLU(alpha=alpha),
data_format=data_format,
name="conv2")
def call(self, x, training=None):
identity = x
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x + identity
class DarkNet53(tf.keras.Model):
"""
DarkNet-53 model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
alpha : float, default 0.1
Slope coefficient for Leaky ReLU activation.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
alpha=0.1,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(DarkNet53, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
activation=nn.LeakyReLU(alpha=alpha),
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
if j == 0:
stage.add(conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
activation=nn.LeakyReLU(alpha=alpha),
data_format=data_format,
name="unit{}".format(j + 1)))
else:
stage.add(DarkUnit(
in_channels=in_channels,
out_channels=out_channels,
alpha=alpha,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_darknet53(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DarkNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
layers = [2, 3, 9, 9, 5]
channels_per_layers = [64, 128, 256, 512, 1024]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = DarkNet53(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def darknet53(**kwargs):
"""
DarkNet-53 'Reference' model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_darknet53(model_name="darknet53", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
darknet53,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != darknet53 or weight_count == 41609928)
if __name__ == "__main__":
_test()
| 7,225 | 31.54955 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/mobilenet.py | """
MobileNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
"""
__all__ = ['MobileNet', 'mobilenet_w1', 'mobilenet_w3d4', 'mobilenet_wd2', 'mobilenet_wd4', 'get_mobilenet']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv3x3_block, dwsconv3x3_block, SimpleSequential, flatten
class MobileNet(tf.keras.Model):
"""
MobileNet model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
first_stage_stride : bool
Whether stride is used at the first stage.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
dw_activation : function or str or None, default 'relu'
Activation function after the depthwise convolution block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
first_stage_stride,
dw_use_bn=True,
dw_activation="relu",
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MobileNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
init_block_channels = channels[0][0]
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels[1:]):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and ((i != 0) or first_stage_stride) else 1
stage.add(dwsconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dw_use_bn=dw_use_bn,
dw_activation=dw_activation,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_mobilenet(width_scale,
dws_simplified=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create MobileNet model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
dws_simplified : bool, default False
Whether to use simplified depthwise separable convolution block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 512], [1024, 1024]]
first_stage_stride = False
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if dws_simplified:
dw_use_bn = False
dw_activation = None
else:
dw_use_bn = True
dw_activation = "relu"
net = MobileNet(
channels=channels,
first_stage_stride=first_stage_stride,
dw_use_bn=dw_use_bn,
dw_activation=dw_activation,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def mobilenet_w1(**kwargs):
"""
1.0 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=1.0, model_name="mobilenet_w1", **kwargs)
def mobilenet_w3d4(**kwargs):
"""
0.75 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.75, model_name="mobilenet_w3d4", **kwargs)
def mobilenet_wd2(**kwargs):
"""
0.5 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.5, model_name="mobilenet_wd2", **kwargs)
def mobilenet_wd4(**kwargs):
"""
0.25 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.25, model_name="mobilenet_wd4", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
mobilenet_w1,
mobilenet_w3d4,
mobilenet_wd2,
mobilenet_wd4,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenet_w1 or weight_count == 4231976)
assert (model != mobilenet_w3d4 or weight_count == 2585560)
assert (model != mobilenet_wd2 or weight_count == 1331592)
assert (model != mobilenet_wd4 or weight_count == 470072)
if __name__ == "__main__":
_test()
| 8,450 | 33.493878 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/dpn.py | """
DPN for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Dual Path Networks,' https://arxiv.org/abs/1707.01629.
"""
__all__ = ['DPN', 'dpn68', 'dpn68b', 'dpn98', 'dpn107', 'dpn131']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import MaxPool2d, GlobalAvgPool2d, BatchNorm, Conv2d, conv1x1, DualPathSequential, SimpleSequential,\
flatten, is_channels_first, get_channel_axis
class GlobalAvgMaxPool2D(nn.Layer):
"""
Global average+max pooling operation for spatial data.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(GlobalAvgMaxPool2D, self).__init__(**kwargs)
self.axis = get_channel_axis(data_format)
self.avg_pool = nn.GlobalAvgPool2D(
data_format=data_format,
name="avg_pool")
self.max_pool = nn.GlobalMaxPool2D(
data_format=data_format,
name="max_pool")
def call(self, x, training=None):
x_avg = self.avg_pool(x)
x_max = self.max_pool(x)
x = 0.5 * (x_avg + x_max)
x = tf.expand_dims(tf.expand_dims(x, axis=self.axis), axis=self.axis)
return x
def dpn_batch_norm(channels,
data_format="channels_last",
**kwargs):
"""
DPN specific Batch normalization layer.
Parameters:
----------
channels : int
Number of channels in input data.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
assert (channels is not None)
return BatchNorm(
epsilon=0.001,
data_format=data_format,
**kwargs)
class PreActivation(nn.Layer):
"""
DPN specific block, which performs the preactivation like in RreResNet.
Parameters:
----------
channels : int
Number of channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
data_format="channels_last",
**kwargs):
super(PreActivation, self).__init__(**kwargs)
self.bn = dpn_batch_norm(
channels=channels,
data_format=data_format,
name="bn")
self.activ = nn.ReLU()
def call(self, x, training=None):
x = self.bn(x, training=training)
x = self.activ(x)
return x
class DPNConv(nn.Layer):
"""
DPN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int
Number of groups.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
groups,
data_format="channels_last",
**kwargs):
super(DPNConv, self).__init__(**kwargs)
self.bn = dpn_batch_norm(
channels=in_channels,
data_format=data_format,
name="bn")
self.activ = nn.ReLU()
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
use_bias=False,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.bn(x, training=training)
x = self.activ(x)
x = self.conv(x)
return x
def dpn_conv1x1(in_channels,
out_channels,
strides=1,
data_format="channels_last",
**kwargs):
"""
1x1 version of the DPN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DPNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=1,
data_format=data_format,
**kwargs)
def dpn_conv3x3(in_channels,
out_channels,
strides,
groups,
data_format="channels_last",
**kwargs):
"""
3x3 version of the DPN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
groups : int
Number of groups.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DPNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=1,
groups=groups,
data_format=data_format,
**kwargs)
class DPNUnit(nn.Layer):
"""
DPN unit.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of intermediate channels.
bw : int
Number of residual channels.
inc : int
Incrementing step for channels.
groups : int
Number of groups in the units.
has_proj : bool
Whether to use projection.
key_strides : int
Key strides of the convolutions.
b_case : bool, default False
Whether to use B-case model.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
mid_channels,
bw,
inc,
groups,
has_proj,
key_strides,
b_case=False,
data_format="channels_last",
**kwargs):
super(DPNUnit, self).__init__(**kwargs)
self.bw = bw
self.has_proj = has_proj
self.b_case = b_case
self.data_format = data_format
if self.has_proj:
self.conv_proj = dpn_conv1x1(
in_channels=in_channels,
out_channels=bw + 2 * inc,
strides=key_strides,
data_format=data_format,
name="conv_proj")
self.conv1 = dpn_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = dpn_conv3x3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=key_strides,
groups=groups,
data_format=data_format,
name="conv2")
if b_case:
self.preactiv = PreActivation(
channels=mid_channels,
data_format=data_format,
name="preactiv")
self.conv3a = conv1x1(
in_channels=mid_channels,
out_channels=bw,
data_format=data_format,
name="conv3a")
self.conv3b = conv1x1(
in_channels=mid_channels,
out_channels=inc,
data_format=data_format,
name="conv3b")
else:
self.conv3 = dpn_conv1x1(
in_channels=mid_channels,
out_channels=bw + inc,
data_format=data_format,
name="conv3")
def call(self, x1, x2=None, training=None):
axis = get_channel_axis(self.data_format)
x_in = tf.concat([x1, x2], axis=axis) if x2 is not None else x1
if self.has_proj:
x_s = self.conv_proj(x_in, training=training)
channels = (x_s.get_shape().as_list())[axis]
x_s1, x_s2 = tf.split(x_s, num_or_size_splits=[self.bw, channels - self.bw], axis=axis)
# x_s1 = F.slice_axis(x_s, axis=1, begin=0, end=self.bw)
# x_s2 = F.slice_axis(x_s, axis=1, begin=self.bw, end=None)
else:
assert (x2 is not None)
x_s1 = x1
x_s2 = x2
x_in = self.conv1(x_in, training=training)
x_in = self.conv2(x_in, training=training)
if self.b_case:
x_in = self.preactiv(x_in, training=training)
y1 = self.conv3a(x_in, training=training)
y2 = self.conv3b(x_in, training=training)
else:
x_in = self.conv3(x_in, training=training)
# y1 = F.slice_axis(x_in, axis=1, begin=0, end=self.bw)
# y2 = F.slice_axis(x_in, axis=1, begin=self.bw, end=None)
channels = (x_in.get_shape().as_list())[axis]
y1, y2 = tf.split(x_in, num_or_size_splits=[self.bw, channels - self.bw], axis=axis)
residual = x_s1 + y1
dense = tf.concat([x_s2, y2], axis=axis)
return residual, dense
class DPNInitBlock(nn.Layer):
"""
DPN specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
padding,
data_format="channels_last",
**kwargs):
super(DPNInitBlock, self).__init__(**kwargs)
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=2,
padding=padding,
use_bias=False,
data_format=data_format,
name="conv")
self.bn = dpn_batch_norm(
channels=out_channels,
data_format=data_format,
name="bn")
self.activ = nn.ReLU()
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x)
x = self.bn(x, training=training)
x = self.activ(x)
x = self.pool(x)
return x
class DPNFinalBlock(nn.Layer):
"""
DPN final block, which performs the preactivation with cutting.
Parameters:
----------
channels : int
Number of channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
data_format="channels_last",
**kwargs):
super(DPNFinalBlock, self).__init__(**kwargs)
self.data_format = data_format
self.activ = PreActivation(
channels=channels,
data_format=data_format,
name="activ")
def call(self, x1, x2, training=None):
assert (x2 is not None)
x = tf.concat([x1, x2], axis=get_channel_axis(self.data_format))
x = self.activ(x)
return x, None
class DPN(tf.keras.Model):
"""
DPN model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
init_block_kernel_size : int or tuple/list of 2 int
Convolution window size for the initial unit.
init_block_padding : int or tuple/list of 2 int
Padding value for convolution layer in the initial unit.
rs : list f int
Number of intermediate channels for each unit.
bws : list f int
Number of residual channels for each unit.
incs : list f int
Incrementing step for channels for each unit.
groups : int
Number of groups in the units.
b_case : bool
Whether to use B-case model.
for_training : bool
Whether to use model for training.
test_time_pool : bool
Whether to use the avg-max pooling in the inference mode.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
init_block_kernel_size,
init_block_padding,
rs,
bws,
incs,
groups,
b_case,
for_training,
test_time_pool,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(DPN, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=0,
name="features")
self.features.children.append(DPNInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
kernel_size=init_block_kernel_size,
padding=init_block_padding,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(name="stage{}".format(i + 1))
r = rs[i]
bw = bws[i]
inc = incs[i]
for j, out_channels in enumerate(channels_per_stage):
has_proj = (j == 0)
key_strides = 2 if (j == 0) and (i != 0) else 1
stage.children.append(DPNUnit(
in_channels=in_channels,
mid_channels=r,
bw=bw,
inc=inc,
groups=groups,
has_proj=has_proj,
key_strides=key_strides,
b_case=b_case,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.children.append(stage)
self.features.children.append(DPNFinalBlock(
channels=in_channels,
data_format=data_format,
name="final_block"))
self.output1 = SimpleSequential(name="output1")
if for_training or not test_time_pool:
self.output1.add(GlobalAvgPool2d(
data_format=data_format,
name="final_pool"))
self.output1.add(conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True,
data_format=data_format,
name="classifier"))
else:
self.output1.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="avg_pool"))
self.output1.add(conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True,
data_format=data_format,
name="classifier"))
self.output1.add(GlobalAvgMaxPool2D(
data_format=data_format,
name="avgmax_pool"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x)
x = flatten(x, self.data_format)
return x
def get_dpn(num_layers,
b_case=False,
for_training=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DPN model with specific parameters.
Parameters:
----------
num_layers : int
Number of layers.
b_case : bool, default False
Whether to use B-case model.
for_training : bool
Whether to use model for training.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if num_layers == 68:
init_block_channels = 10
init_block_kernel_size = 3
init_block_padding = 1
bw_factor = 1
k_r = 128
groups = 32
k_sec = (3, 4, 12, 3)
incs = (16, 32, 32, 64)
test_time_pool = True
elif num_layers == 98:
init_block_channels = 96
init_block_kernel_size = 7
init_block_padding = 3
bw_factor = 4
k_r = 160
groups = 40
k_sec = (3, 6, 20, 3)
incs = (16, 32, 32, 128)
test_time_pool = True
elif num_layers == 107:
init_block_channels = 128
init_block_kernel_size = 7
init_block_padding = 3
bw_factor = 4
k_r = 200
groups = 50
k_sec = (4, 8, 20, 3)
incs = (20, 64, 64, 128)
test_time_pool = True
elif num_layers == 131:
init_block_channels = 128
init_block_kernel_size = 7
init_block_padding = 3
bw_factor = 4
k_r = 160
groups = 40
k_sec = (4, 8, 28, 3)
incs = (16, 32, 32, 128)
test_time_pool = True
else:
raise ValueError("Unsupported DPN version with number of layers {}".format(num_layers))
channels = [[0] * li for li in k_sec]
rs = [0 * li for li in k_sec]
bws = [0 * li for li in k_sec]
for i in range(len(k_sec)):
rs[i] = (2 ** i) * k_r
bws[i] = (2 ** i) * 64 * bw_factor
inc = incs[i]
channels[i][0] = bws[i] + 3 * inc
for j in range(1, k_sec[i]):
channels[i][j] = channels[i][j - 1] + inc
net = DPN(
channels=channels,
init_block_channels=init_block_channels,
init_block_kernel_size=init_block_kernel_size,
init_block_padding=init_block_padding,
rs=rs,
bws=bws,
incs=incs,
groups=groups,
b_case=b_case,
for_training=for_training,
test_time_pool=test_time_pool,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def dpn68(**kwargs):
"""
DPN-68 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dpn(num_layers=68, b_case=False, model_name="dpn68", **kwargs)
def dpn68b(**kwargs):
"""
DPN-68b model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dpn(num_layers=68, b_case=True, model_name="dpn68b", **kwargs)
def dpn98(**kwargs):
"""
DPN-98 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dpn(num_layers=98, b_case=False, model_name="dpn98", **kwargs)
def dpn107(**kwargs):
"""
DPN-107 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dpn(num_layers=107, b_case=False, model_name="dpn107", **kwargs)
def dpn131(**kwargs):
"""
DPN-131 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dpn(num_layers=131, b_case=False, model_name="dpn131", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
dpn68,
dpn68b,
dpn98,
dpn107,
dpn131,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dpn68 or weight_count == 12611602)
assert (model != dpn68b or weight_count == 12611602)
assert (model != dpn98 or weight_count == 61570728)
assert (model != dpn107 or weight_count == 86917800)
assert (model != dpn131 or weight_count == 79254504)
if __name__ == "__main__":
_test()
| 23,478 | 30.056878 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/sknet.py | """
SKNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586.
"""
__all__ = ['SKNet', 'sknet50', 'sknet101', 'sknet152']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent, SimpleSequential, flatten, is_channels_first,\
get_channel_axis
from .resnet import ResInitBlock
class SKConvBlock(nn.Layer):
"""
SKNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
groups : int, default 32
Number of groups in branches.
num_branches : int, default 2
Number of branches (`M` parameter in the paper).
reduction : int, default 16
Reduction value for intermediate channels (`r` parameter in the paper).
min_channels : int, default 32
Minimal number of intermediate channels (`L` parameter in the paper).
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
groups=32,
num_branches=2,
reduction=16,
min_channels=32,
data_format="channels_last",
**kwargs):
super(SKConvBlock, self).__init__(**kwargs)
self.num_branches = num_branches
self.out_channels = out_channels
self.data_format = data_format
self.axis = get_channel_axis(data_format)
mid_channels = max(in_channels // reduction, min_channels)
self.branches = Concurrent(
stack=True,
data_format=data_format,
name="branches")
for i in range(num_branches):
dilation = 1 + i
self.branches.children.append(conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=dilation,
dilation=dilation,
groups=groups,
data_format=data_format,
name="branch{}".format(i + 2)))
self.pool = nn.GlobalAveragePooling2D(
data_format=data_format,
name="pool")
self.fc1 = conv1x1_block(
in_channels=out_channels,
out_channels=mid_channels,
data_format=data_format,
name="fc1")
self.fc2 = conv1x1(
in_channels=mid_channels,
out_channels=(out_channels * num_branches),
data_format=data_format,
name="fc2")
self.softmax = nn.Softmax(axis=self.axis)
def call(self, x, training=None):
y = self.branches(x)
u = tf.math.reduce_sum(y, axis=self.axis)
s = self.pool(u)
if is_channels_first(self.data_format):
s = tf.expand_dims(tf.expand_dims(s, 2), 3)
else:
s = tf.expand_dims(tf.expand_dims(s, 1), 2)
z = self.fc1(s)
w = self.fc2(z)
if is_channels_first(self.data_format):
w = tf.reshape(w, shape=(-1, self.num_branches, self.out_channels))
else:
w = tf.reshape(w, shape=(-1, self.out_channels, self.num_branches))
w = self.softmax(w)
if is_channels_first(self.data_format):
w = tf.expand_dims(tf.expand_dims(w, 3), 4)
else:
w = tf.expand_dims(tf.expand_dims(w, 1), 2)
y = y * w
y = tf.math.reduce_sum(y, axis=self.axis)
return y
class SKNetBottleneck(nn.Layer):
"""
SKNet bottleneck block for residual path in SKNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck_factor : int, default 2
Bottleneck factor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck_factor=2,
data_format="channels_last",
**kwargs):
super(SKNetBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = SKConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class SKNetUnit(nn.Layer):
"""
SKNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
data_format="channels_last",
**kwargs):
super(SKNetUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = SKNetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class SKNet(tf.keras.Model):
"""
SKNet model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SKNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SKNetUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_sknet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SKNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported SKNet with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SKNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def sknet50(**kwargs):
"""
SKNet-50 model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sknet(blocks=50, model_name="sknet50", **kwargs)
def sknet101(**kwargs):
"""
SKNet-101 model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sknet(blocks=101, model_name="sknet101", **kwargs)
def sknet152(**kwargs):
"""
SKNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sknet(blocks=152, model_name="sknet152", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
sknet50,
sknet101,
sknet152,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sknet50 or weight_count == 27479784)
assert (model != sknet101 or weight_count == 48736040)
assert (model != sknet152 or weight_count == 66295656)
if __name__ == "__main__":
_test()
| 13,222 | 31.09466 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/spnasnet.py | """
Single-Path NASNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours,'
https://arxiv.org/abs/1904.02877.
"""
__all__ = ['SPNASNet', 'spnasnet']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SimpleSequential, flatten,\
is_channels_first
class SPNASUnit(nn.Layer):
"""
Single-Path NASNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : int
Expansion factor for each unit.
use_skip : bool, default True
Whether to use skip connection.
activation : str, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_kernel3,
exp_factor,
use_skip=True,
activation="relu",
data_format="channels_last",
**kwargs):
super(SPNASUnit, self).__init__(**kwargs)
assert (exp_factor >= 1)
self.residual = (in_channels == out_channels) and (strides == 1) and use_skip
self.use_exp_conv = exp_factor > 1
mid_channels = exp_factor * in_channels
if self.use_exp_conv:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
activation=activation,
data_format=data_format,
name="exp_conv")
if use_kernel3:
self.conv1 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=activation,
data_format=data_format,
name="conv1")
else:
self.conv1 = dwconv5x5_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=activation,
data_format=data_format,
name="conv1")
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x, training=training)
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
if self.residual:
x = x + identity
return x
class SPNASInitBlock(nn.Layer):
"""
Single-Path NASNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
data_format="channels_last",
**kwargs):
super(SPNASInitBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
data_format=data_format,
name="conv1")
self.conv2 = SPNASUnit(
in_channels=mid_channels,
out_channels=out_channels,
strides=1,
use_kernel3=True,
exp_factor=1,
use_skip=False,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class SPNASFinalBlock(nn.Layer):
"""
Single-Path NASNet specific final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
data_format="channels_last",
**kwargs):
super(SPNASFinalBlock, self).__init__(**kwargs)
self.conv1 = SPNASUnit(
in_channels=in_channels,
out_channels=mid_channels,
strides=1,
use_kernel3=True,
exp_factor=6,
use_skip=False,
data_format=data_format,
name="conv1")
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class SPNASNet(tf.keras.Model):
"""
Single-Path NASNet model from 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours,'
https://arxiv.org/abs/1904.02877.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : list of 2 int
Number of output channels for the initial unit.
final_block_channels : list of 2 int
Number of output channels for the final block of the feature extractor.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernels3,
exp_factors,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SPNASNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(SPNASInitBlock(
in_channels=in_channels,
out_channels=init_block_channels[1],
mid_channels=init_block_channels[0],
data_format=data_format,
name="init_block"))
in_channels = init_block_channels[1]
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if ((j == 0) and (i != 3)) or \
((j == len(channels_per_stage) // 2) and (i == 3)) else 1
use_kernel3 = kernels3[i][j] == 1
exp_factor = exp_factors[i][j]
stage.add(SPNASUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(SPNASFinalBlock(
in_channels=in_channels,
out_channels=final_block_channels[1],
mid_channels=final_block_channels[0],
data_format=data_format,
name="final_block"))
in_channels = final_block_channels[1]
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_spnasnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create Single-Path NASNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = [32, 16]
final_block_channels = [320, 1280]
channels = [[24, 24, 24], [40, 40, 40, 40], [80, 80, 80, 80], [96, 96, 96, 96, 192, 192, 192, 192]]
kernels3 = [[1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0]]
exp_factors = [[3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3, 6, 6, 6, 6]]
net = SPNASNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernels3=kernels3,
exp_factors=exp_factors,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def spnasnet(**kwargs):
"""
Single-Path NASNet model from 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours,'
https://arxiv.org/abs/1904.02877.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_spnasnet(model_name="spnasnet", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
spnasnet,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != spnasnet or weight_count == 4421616)
if __name__ == "__main__":
_test()
| 12,190 | 32.491758 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/fastscnn.py | """
Fast-SCNN for image segmentation, implemented in TensorFlow.
Original paper: 'Fast-SCNN: Fast Semantic Segmentation Network,' https://arxiv.org/abs/1902.04502.
"""
__all__ = ['FastSCNN', 'fastscnn_cityscapes']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwsconv3x3_block, Concurrent,\
InterpolationBlock, SimpleSequential, Identity, get_im_size, is_channels_first
class Stem(nn.Layer):
"""
Fast-SCNN specific stem block.
Parameters:
----------
in_channels : int
Number of input channels.
channels : tuple/list of 3 int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
channels,
data_format="channels_last",
**kwargs):
super(Stem, self).__init__(**kwargs)
assert (len(channels) == 3)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=channels[0],
strides=2,
padding=0,
data_format=data_format,
name="conv1")
self.conv2 = dwsconv3x3_block(
in_channels=channels[0],
out_channels=channels[1],
strides=2,
data_format=data_format,
name="conv2")
self.conv3 = dwsconv3x3_block(
in_channels=channels[1],
out_channels=channels[2],
strides=2,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class LinearBottleneck(nn.Layer):
"""
Fast-SCNN specific Linear Bottleneck layer from MobileNetV2.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
data_format="channels_last",
**kwargs):
super(LinearBottleneck, self).__init__(**kwargs)
self.residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * 6
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
if self.residual:
identity = x
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
if self.residual:
x = x + identity
return x
class FeatureExtractor(nn.Layer):
"""
Fast-SCNN specific feature extractor/encoder.
Parameters:
----------
in_channels : int
Number of input channels.
channels : list of list of int
Number of output channels for each unit.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
channels,
data_format="channels_last",
**kwargs):
super(FeatureExtractor, self).__init__(**kwargs)
self.features = SimpleSequential(name="features")
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != len(channels) - 1) else 1
stage.add(LinearBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
def call(self, x, training=None):
x = self.features(x, training=training)
return x
class PoolingBranch(nn.Layer):
"""
Fast-SCNN specific pooling branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of 2 int or None
Spatial size of input image.
down_size : int
Spatial size of downscaled image.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
down_size,
data_format="channels_last",
**kwargs):
super(PoolingBranch, self).__init__(**kwargs)
self.in_size = in_size
self.down_size = down_size
self.data_format = data_format
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv")
self.up = InterpolationBlock(
scale_factor=None,
out_size=in_size,
data_format=data_format,
name="up")
def call(self, x, training=None):
in_size = self.in_size if self.in_size is not None else get_im_size(x, data_format=self.data_format)
x = nn.AveragePooling2D(pool_size=(in_size[0] // self.down_size, in_size[1] // self.down_size), strides=1,
data_format=self.data_format, name="pool")(x)
x = self.conv(x, training=training)
x = self.up(x, in_size)
return x
class FastPyramidPooling(nn.Layer):
"""
Fast-SCNN specific fast pyramid pooling block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of 2 int or None
Spatial size of input image.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
data_format="channels_last",
**kwargs):
super(FastPyramidPooling, self).__init__(**kwargs)
down_sizes = [1, 2, 3, 6]
mid_channels = in_channels // 4
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(Identity(name="branch1"))
for i, down_size in enumerate(down_sizes):
self.branches.add(PoolingBranch(
in_channels=in_channels,
out_channels=mid_channels,
in_size=in_size,
down_size=down_size,
data_format=data_format,
name="branch{}".format(i + 2)))
self.conv = conv1x1_block(
in_channels=(in_channels * 2),
out_channels=out_channels,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.branches(x, training=training)
x = self.conv(x, training=training)
return x
class FeatureFusion(nn.Layer):
"""
Fast-SCNN specific feature fusion block.
Parameters:
----------
x_in_channels : int
Number of high resolution (x) input channels.
y_in_channels : int
Number of low resolution (y) input channels.
out_channels : int
Number of output channels.
x_in_size : tuple of 2 int or None
Spatial size of high resolution (x) input image.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
x_in_channels,
y_in_channels,
out_channels,
x_in_size,
data_format="channels_last",
**kwargs):
super(FeatureFusion, self).__init__(**kwargs)
self.x_in_size = x_in_size
self.data_format = data_format
self.up = InterpolationBlock(
scale_factor=None,
out_size=x_in_size,
data_format=data_format,
name="up")
self.low_dw_conv = dwconv3x3_block(
in_channels=y_in_channels,
out_channels=out_channels,
data_format=data_format,
name="low_dw_conv")
self.low_pw_conv = conv1x1_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=True,
activation=None,
data_format=data_format,
name="low_pw_conv")
self.high_conv = conv1x1_block(
in_channels=x_in_channels,
out_channels=out_channels,
use_bias=True,
activation=None,
data_format=data_format,
name="high_conv")
self.activ = nn.ReLU()
def call(self, x, y, training=None):
x_in_size = self.x_in_size if self.x_in_size is not None else get_im_size(x, data_format=self.data_format)
y = self.up(y, x_in_size)
y = self.low_dw_conv(y, training=training)
y = self.low_pw_conv(y, training=training)
x = self.high_conv(x, training=training)
out = x + y
return self.activ(out)
class Head(nn.Layer):
"""
Fast-SCNN head (classifier) block.
Parameters:
----------
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
classes,
data_format="channels_last",
**kwargs):
super(Head, self).__init__(**kwargs)
self.conv1 = dwsconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
data_format=data_format,
name="conv1")
self.conv2 = dwsconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
data_format=data_format,
name="conv2")
self.dropout = nn.Dropout(
rate=0.1,
name="dropout")
self.conv3 = conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.dropout(x, training=training)
x = self.conv3(x)
return x
class AuxHead(nn.Layer):
"""
Fast-SCNN auxiliary (after stem) head (classifier) block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
classes : int
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
mid_channels,
classes,
data_format="channels_last",
**kwargs):
super(AuxHead, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.dropout = nn.Dropout(
rate=0.1,
name="dropout")
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=classes,
use_bias=True,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.dropout(x, training=training)
x = self.conv2(x)
return x
class FastSCNN(tf.keras.Model):
"""
Fast-SCNN from 'Fast-SCNN: Fast Semantic Segmentation Network,' https://arxiv.org/abs/1902.04502.
Parameters:
----------
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 1024)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(1024, 1024),
classes=19,
data_format="channels_last",
**kwargs):
super(FastSCNN, self).__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 32 == 0) and (in_size[1] % 32 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
self.data_format = data_format
steam_channels = [32, 48, 64]
self.stem = Stem(
in_channels=in_channels,
channels=steam_channels,
data_format=data_format,
name="stem")
in_channels = steam_channels[-1]
feature_channels = [[64, 64, 64], [96, 96, 96], [128, 128, 128]]
self.features = FeatureExtractor(
in_channels=in_channels,
channels=feature_channels,
data_format=data_format,
name="features")
pool_out_size = (in_size[0] // 32, in_size[1] // 32) if fixed_size else None
self.pool = FastPyramidPooling(
in_channels=feature_channels[-1][-1],
out_channels=feature_channels[-1][-1],
in_size=pool_out_size,
data_format=data_format,
name="pool")
fusion_out_size = (in_size[0] // 8, in_size[1] // 8) if fixed_size else None
fusion_out_channels = 128
self.fusion = FeatureFusion(
x_in_channels=steam_channels[-1],
y_in_channels=feature_channels[-1][-1],
out_channels=fusion_out_channels,
x_in_size=fusion_out_size,
data_format=data_format,
name="fusion")
self.head = Head(
in_channels=fusion_out_channels,
classes=classes,
data_format=data_format,
name="head")
self.up = InterpolationBlock(
scale_factor=None,
out_size=in_size,
data_format=data_format,
name="up")
if self.aux:
self.aux_head = AuxHead(
in_channels=64,
mid_channels=64,
classes=classes,
data_format=data_format,
name="aux_head")
def call(self, x, training=None):
in_size = self.in_size if self.fixed_size else get_im_size(x, data_format=self.data_format)
x = self.stem(x, training=training)
y = self.features(x, training=training)
y = self.pool(y, training=training)
y = self.fusion(x, y, training=training)
y = self.head(y, training=training)
y = self.up(y, in_size)
if self.aux:
x = self.aux_head(x, training=training)
x = self.up(x, in_size)
return y, x
return y
def get_fastscnn(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create Fast-SCNN model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = FastSCNN(
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
by_name=True,
skip_mismatch=True)
return net
def fastscnn_cityscapes(classes=19, aux=True, **kwargs):
"""
Fast-SCNN model for Cityscapes from 'Fast-SCNN: Fast Semantic Segmentation Network,'
https://arxiv.org/abs/1902.04502.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_fastscnn(classes=classes, aux=aux, model_name="fastscnn_cityscapes", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
in_size = (1024, 2048)
aux = True
fixed_size = False
pretrained = True
models = [
(fastscnn_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux, fixed_size=fixed_size, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
ys = net(x)
y = ys[0] if aux else ys
assert (y.shape[0] == x.shape[0])
if is_channels_first(data_format):
assert ((y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and (y.shape[3] == x.shape[3]))
else:
assert ((y.shape[3] == classes) and (y.shape[1] == x.shape[1]) and (y.shape[2] == x.shape[2]))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != fastscnn_cityscapes or weight_count == 1176278)
else:
assert (model != fastscnn_cityscapes or weight_count == 1138051)
if __name__ == "__main__":
_test()
| 19,829 | 31.831126 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/darknet.py | """
DarkNet for ImageNet-1K, implemented in TensorFlow.
Original source: 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
"""
__all__ = ['DarkNet', 'darknet_ref', 'darknet_tiny', 'darknet19']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import Conv2d, conv1x1_block, conv3x3_block, MaxPool2d, SimpleSequential, flatten
def dark_convYxY(in_channels,
out_channels,
alpha,
pointwise,
data_format="channels_last",
**kwargs):
"""
DarkNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
alpha : float
Slope coefficient for Leaky ReLU activation.
pointwise : bool
Whether use 1x1 (pointwise) convolution or 3x3 convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
if pointwise:
return conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation=nn.LeakyReLU(alpha=alpha),
data_format=data_format,
**kwargs)
else:
return conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
activation=nn.LeakyReLU(alpha=alpha),
data_format=data_format,
**kwargs)
class DarkNet(tf.keras.Model):
"""
DarkNet model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
odd_pointwise : bool
Whether pointwise convolution layer is used for each odd unit.
avg_pool_size : int
Window size of the final average pooling.
cls_activ : bool
Whether classification convolution layer uses an activation.
alpha : float, default 0.1
Slope coefficient for Leaky ReLU activation.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
odd_pointwise,
avg_pool_size,
cls_activ,
alpha=0.1,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(DarkNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
stage.add(dark_convYxY(
in_channels=in_channels,
out_channels=out_channels,
alpha=alpha,
pointwise=(len(channels_per_stage) > 1) and not (((j + 1) % 2 == 1) ^ odd_pointwise),
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
if i != len(channels) - 1:
stage.add(MaxPool2d(
pool_size=2,
strides=2,
data_format=data_format,
name="pool{}".format(i + 1)))
self.features.add(stage)
self.output1 = SimpleSequential(name="output1")
self.output1.add(Conv2d(
in_channels=in_channels,
out_channels=classes,
kernel_size=1,
data_format=data_format,
name="final_conv"))
if cls_activ:
self.output1.add(nn.LeakyReLU(alpha=alpha))
self.output1.add(nn.AveragePooling2D(
pool_size=avg_pool_size,
strides=1,
data_format=data_format,
name="final_pool"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x)
x = flatten(x, self.data_format)
return x
def get_darknet(version,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DarkNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('ref', 'tiny' or '19').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == 'ref':
channels = [[16], [32], [64], [128], [256], [512], [1024]]
odd_pointwise = False
avg_pool_size = 3
cls_activ = True
elif version == 'tiny':
channels = [[16], [32], [16, 128, 16, 128], [32, 256, 32, 256], [64, 512, 64, 512, 128]]
odd_pointwise = True
avg_pool_size = 14
cls_activ = False
elif version == '19':
channels = [[32], [64], [128, 64, 128], [256, 128, 256], [512, 256, 512, 256, 512],
[1024, 512, 1024, 512, 1024]]
odd_pointwise = False
avg_pool_size = 7
cls_activ = False
else:
raise ValueError("Unsupported DarkNet version {}".format(version))
net = DarkNet(
channels=channels,
odd_pointwise=odd_pointwise,
avg_pool_size=avg_pool_size,
cls_activ=cls_activ,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def darknet_ref(**kwargs):
"""
DarkNet 'Reference' model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_darknet(version="ref", model_name="darknet_ref", **kwargs)
def darknet_tiny(**kwargs):
"""
DarkNet Tiny model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_darknet(version="tiny", model_name="darknet_tiny", **kwargs)
def darknet19(**kwargs):
"""
DarkNet-19 model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_darknet(version="19", model_name="darknet19", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
darknet_ref,
darknet_tiny,
darknet19,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != darknet_ref or weight_count == 7319416)
assert (model != darknet_tiny or weight_count == 1042104)
assert (model != darknet19 or weight_count == 20842376)
if __name__ == "__main__":
_test()
| 8,916 | 32.148699 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/dicenet.py | """
DiCENet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516.
"""
__all__ = ['DiceNet', 'dicenet_wd5', 'dicenet_wd2', 'dicenet_w3d4', 'dicenet_w1', 'dicenet_w5d4', 'dicenet_w3d2',
'dicenet_w7d8', 'dicenet_w2']
import os
import math
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv3x3, conv1x1_block, conv3x3_block, AvgPool2d, MaxPool2d, NormActivation,\
ChannelShuffle, Concurrent, PReLU2, SimpleSequential, is_channels_first, get_channel_axis, flatten
class SpatialDiceBranch(nn.Layer):
"""
Spatial element of DiCE block for selected dimension.
Parameters:
----------
sp_size : int
Desired size for selected spatial dimension.
is_height : bool
Is selected dimension height.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
sp_size,
is_height,
data_format="channels_last",
**kwargs):
super(SpatialDiceBranch, self).__init__(**kwargs)
self.is_height = is_height
self.data_format = data_format
if is_channels_first(self.data_format):
self.index = 2 if is_height else 3
else:
self.index = 1 if is_height else 2
self.base_sp_size = sp_size
self.conv = conv3x3(
in_channels=self.base_sp_size,
out_channels=self.base_sp_size,
groups=self.base_sp_size,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x_shape = x.get_shape().as_list()
height, width = x_shape[2:4] if is_channels_first(self.data_format) else x_shape[1:3]
if self.is_height:
real_sp_size = height
real_in_size = (real_sp_size, width)
base_in_size = (self.base_sp_size, width)
else:
real_sp_size = width
real_in_size = (height, real_sp_size)
base_in_size = (height, self.base_sp_size)
if real_sp_size != self.base_sp_size:
if is_channels_first(self.data_format):
x = tf.transpose(x, perm=[0, 2, 3, 1])
x = tf.image.resize(
images=x,
size=base_in_size,
method=self.method)
if is_channels_first(self.data_format):
x = tf.transpose(x, perm=[0, 3, 1, 2])
if self.is_height:
if is_channels_first(self.data_format):
x = tf.transpose(x, perm=(0, 2, 1, 3))
else:
x = tf.transpose(x, perm=(0, 3, 2, 1))
else:
if is_channels_first(self.data_format):
x = tf.transpose(x, perm=(0, 3, 2, 1))
else:
x = tf.transpose(x, perm=(0, 1, 3, 2))
x = self.conv(x)
if self.is_height:
if is_channels_first(self.data_format):
x = tf.transpose(x, perm=(0, 2, 1, 3))
else:
x = tf.transpose(x, perm=(0, 3, 2, 1))
else:
if is_channels_first(self.data_format):
x = tf.transpose(x, perm=(0, 3, 2, 1))
else:
x = tf.transpose(x, perm=(0, 1, 3, 2))
changed_sp_size = x.shape[self.index]
if real_sp_size != changed_sp_size:
if is_channels_first(self.data_format):
x = tf.transpose(x, perm=[0, 2, 3, 1])
x = tf.image.resize(
images=x,
size=real_in_size,
method=self.method)
if is_channels_first(self.data_format):
x = tf.transpose(x, perm=[0, 3, 1, 2])
return x
class DiceBaseBlock(nn.Layer):
"""
Base part of DiCE block (without attention).
Parameters:
----------
channels : int
Number of input/output channels.
in_size : tuple of two ints
Spatial size of the expected input image.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
in_size,
data_format="channels_last",
**kwargs):
super(DiceBaseBlock, self).__init__(**kwargs)
mid_channels = 3 * channels
self.convs = Concurrent()
self.convs.add(conv3x3(
in_channels=channels,
out_channels=channels,
groups=channels,
data_format=data_format,
name="ch_conv"))
self.convs.add(SpatialDiceBranch(
sp_size=in_size[0],
is_height=True,
data_format=data_format,
name="h_conv"))
self.convs.add(SpatialDiceBranch(
sp_size=in_size[1],
is_height=False,
data_format=data_format,
name="w_conv"))
self.norm_activ = NormActivation(
in_channels=mid_channels,
activation=(lambda: PReLU2(in_channels=mid_channels, name="activ")),
data_format=data_format,
name="norm_activ")
self.shuffle = ChannelShuffle(
channels=mid_channels,
groups=3,
data_format=data_format,
name="shuffle")
self.squeeze_conv = conv1x1_block(
in_channels=mid_channels,
out_channels=channels,
groups=channels,
activation=(lambda: PReLU2(in_channels=channels, name="activ")),
data_format=data_format,
name="squeeze_conv")
def call(self, x, training=None):
x = self.convs(x)
x = self.norm_activ(x, training=training)
x = self.shuffle(x)
x = self.squeeze_conv(x, training=training)
return x
class DiceAttBlock(nn.Layer):
"""
Pure attention part of DiCE block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
reduction : int, default 4
Squeeze reduction value.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
reduction=4,
data_format="channels_last",
**kwargs):
super(DiceAttBlock, self).__init__(**kwargs)
self.data_format = data_format
mid_channels = in_channels // reduction
self.pool = nn.GlobalAveragePooling2D(
data_format=data_format,
name="pool")
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=False,
data_format=data_format,
name="conv1")
self.activ = nn.ReLU()
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=False,
data_format=data_format,
name="conv2")
self.sigmoid = tf.nn.sigmoid
def call(self, x, training=None):
w = self.pool(x)
axis = -1 if is_channels_first(self.data_format) else 1
w = tf.expand_dims(tf.expand_dims(w, axis=axis), axis=axis)
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
return w
class DiceBlock(nn.Layer):
"""
DiCE block (volume-wise separable convolutions).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of two ints
Spatial size of the expected input image.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
data_format="channels_last",
**kwargs):
super(DiceBlock, self).__init__(**kwargs)
proj_groups = math.gcd(in_channels, out_channels)
self.base_block = DiceBaseBlock(
channels=in_channels,
in_size=in_size,
data_format=data_format,
name="base_block")
self.att = DiceAttBlock(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="att")
self.proj_conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
groups=proj_groups,
activation=(lambda: PReLU2(in_channels=out_channels, name="activ")),
data_format=data_format,
name="proj_conv")
def call(self, x, training=None):
x = self.base_block(x, training=training)
w = self.att(x, training=training)
x = self.proj_conv(x, training=training)
x = x * w
return x
class StridedDiceLeftBranch(nn.Layer):
"""
Left branch of the strided DiCE block.
Parameters:
----------
channels : int
Number of input/output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
data_format="channels_last",
**kwargs):
super(StridedDiceLeftBranch, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=channels,
out_channels=channels,
strides=2,
groups=channels,
activation=(lambda: PReLU2(in_channels=channels, name="activ")),
data_format=data_format,
name="conv1")
self.conv2 = conv1x1_block(
in_channels=channels,
out_channels=channels,
activation=(lambda: PReLU2(in_channels=channels, name="activ")),
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class StridedDiceRightBranch(nn.Layer):
"""
Right branch of the strided DiCE block.
Parameters:
----------
channels : int
Number of input/output channels.
in_size : tuple of two ints
Spatial size of the expected input image.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
in_size,
data_format="channels_last",
**kwargs):
super(StridedDiceRightBranch, self).__init__(**kwargs)
self.pool = AvgPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
self.dice = DiceBlock(
in_channels=channels,
out_channels=channels,
in_size=(in_size[0] // 2, in_size[1] // 2),
data_format=data_format,
name="dice")
self.conv = conv1x1_block(
in_channels=channels,
out_channels=channels,
activation=(lambda: PReLU2(in_channels=channels, name="activ")),
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.pool(x)
x = self.dice(x, training=training)
x = self.conv(x, training=training)
return x
class StridedDiceBlock(nn.Layer):
"""
Strided DiCE block (strided volume-wise separable convolutions).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of two ints
Spatial size of the expected input image.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
data_format="channels_last",
**kwargs):
super(StridedDiceBlock, self).__init__(**kwargs)
assert (out_channels == 2 * in_channels)
self.branches = Concurrent()
self.branches.add(StridedDiceLeftBranch(
channels=in_channels,
data_format=data_format,
name="left_branch"))
self.branches.add(StridedDiceRightBranch(
channels=in_channels,
in_size=in_size,
data_format=data_format,
name="right_branch"))
self.shuffle = ChannelShuffle(
channels=out_channels,
groups=2,
data_format=data_format,
name="shuffle")
def call(self, x, training=None):
x = self.branches(x, training=training)
x = self.shuffle(x)
return x
class ShuffledDiceRightBranch(nn.Layer):
"""
Right branch of the shuffled DiCE block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of two ints
Spatial size of the expected input image.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
data_format="channels_last",
**kwargs):
super(ShuffledDiceRightBranch, self).__init__(**kwargs)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation=(lambda: PReLU2(in_channels=out_channels, name="activ")),
data_format=data_format,
name="conv")
self.dice = DiceBlock(
in_channels=out_channels,
out_channels=out_channels,
in_size=in_size,
data_format=data_format,
name="dice")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.dice(x, training=training)
return x
class ShuffledDiceBlock(nn.Layer):
"""
Shuffled DiCE block (shuffled volume-wise separable convolutions).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of two ints
Spatial size of the expected input image.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
data_format="channels_last",
**kwargs):
super(ShuffledDiceBlock, self).__init__(**kwargs)
self.data_format = data_format
self.left_part = in_channels - in_channels // 2
right_in_channels = in_channels - self.left_part
right_out_channels = out_channels - self.left_part
self.right_branch = ShuffledDiceRightBranch(
in_channels=right_in_channels,
out_channels=right_out_channels,
in_size=in_size,
data_format=data_format,
name="right_branch")
self.shuffle = ChannelShuffle(
channels=(2 * right_out_channels),
groups=2,
data_format=data_format,
name="shuffle")
def call(self, x, training=None):
axis = get_channel_axis(self.data_format)
x1, x2 = tf.split(x, num_or_size_splits=2, axis=axis)
x2 = self.right_branch(x2, training=training)
x = tf.concat([x1, x2], axis=axis)
x = self.shuffle(x)
return x
class DiceInitBlock(nn.Layer):
"""
DiceNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(DiceInitBlock, self).__init__(**kwargs)
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
activation=(lambda: PReLU2(in_channels=out_channels, name="activ")),
data_format=data_format,
name="conv")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.pool(x)
return x
class DiceClassifier(nn.Layer):
"""
DiceNet specific classifier block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
classes : int, default 1000
Number of classification classes.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
mid_channels,
classes,
dropout_rate,
data_format="channels_last",
**kwargs):
super(DiceClassifier, self).__init__(**kwargs)
self.data_format = data_format
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=4,
data_format=data_format,
name="conv1")
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=classes,
use_bias=True,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
axis = -1 if is_channels_first(self.data_format) else 1
x = tf.expand_dims(tf.expand_dims(x, axis=axis), axis=axis)
x = self.conv1(x)
x = self.dropout(x, training=training)
x = self.conv2(x)
return x
class DiceNet(tf.keras.Model):
"""
DiCENet model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
classifier_mid_channels : int
Number of middle channels for classifier.
dropout_rate : float
Parameter of Dropout layer in classifier. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
classifier_mid_channels,
dropout_rate,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(DiceNet, self).__init__(**kwargs)
assert ((in_size[0] % 32 == 0) and (in_size[1] % 32 == 0))
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(DiceInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
in_size = (in_size[0] // 4, in_size[1] // 4)
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
unit_class = StridedDiceBlock if j == 0 else ShuffledDiceBlock
stage.add(unit_class(
in_channels=in_channels,
out_channels=out_channels,
in_size=in_size,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
in_size = (in_size[0] // 2, in_size[1] // 2) if j == 0 else in_size
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D(
data_format=data_format,
name="final_pool"))
self.output1 = DiceClassifier(
in_channels=in_channels,
mid_channels=classifier_mid_channels,
classes=classes,
dropout_rate=dropout_rate,
data_format=data_format,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x, training=training)
x = flatten(x, self.data_format)
return x
def get_dicenet(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DiCENet model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
channels_per_layers_dict = {
0.2: [32, 64, 128],
0.5: [48, 96, 192],
0.75: [86, 172, 344],
1.0: [116, 232, 464],
1.25: [144, 288, 576],
1.5: [176, 352, 704],
1.75: [210, 420, 840],
2.0: [244, 488, 976],
2.4: [278, 556, 1112],
}
if width_scale not in channels_per_layers_dict.keys():
raise ValueError("Unsupported DiceNet with width scale: {}".format(width_scale))
channels_per_layers = channels_per_layers_dict[width_scale]
layers = [3, 7, 3]
if width_scale > 0.2:
init_block_channels = 24
else:
init_block_channels = 16
channels = [[ci] * li for i, (ci, li) in enumerate(zip(channels_per_layers, layers))]
for i in range(len(channels)):
pred_channels = channels[i - 1][-1] if i != 0 else init_block_channels
channels[i] = [pred_channels * 2] + channels[i]
if width_scale > 2.0:
classifier_mid_channels = 1280
else:
classifier_mid_channels = 1024
if width_scale > 1.0:
dropout_rate = 0.2
else:
dropout_rate = 0.1
net = DiceNet(
channels=channels,
init_block_channels=init_block_channels,
classifier_mid_channels=classifier_mid_channels,
dropout_rate=dropout_rate,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def dicenet_wd5(**kwargs):
"""
DiCENet x0.2 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=0.2, model_name="dicenet_wd5", **kwargs)
def dicenet_wd2(**kwargs):
"""
DiCENet x0.5 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=0.5, model_name="dicenet_wd2", **kwargs)
def dicenet_w3d4(**kwargs):
"""
DiCENet x0.75 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=0.75, model_name="dicenet_w3d4", **kwargs)
def dicenet_w1(**kwargs):
"""
DiCENet x1.0 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=1.0, model_name="dicenet_w1", **kwargs)
def dicenet_w5d4(**kwargs):
"""
DiCENet x1.25 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=1.25, model_name="dicenet_w5d4", **kwargs)
def dicenet_w3d2(**kwargs):
"""
DiCENet x1.5 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=1.5, model_name="dicenet_w3d2", **kwargs)
def dicenet_w7d8(**kwargs):
"""
DiCENet x1.75 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=1.75, model_name="dicenet_w7d8", **kwargs)
def dicenet_w2(**kwargs):
"""
DiCENet x2.0 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=2.0, model_name="dicenet_w2", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
dicenet_wd5,
dicenet_wd2,
dicenet_w3d4,
dicenet_w1,
dicenet_w5d4,
dicenet_w3d2,
dicenet_w7d8,
dicenet_w2,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dicenet_wd5 or weight_count == 1130704)
assert (model != dicenet_wd2 or weight_count == 1214120)
assert (model != dicenet_w3d4 or weight_count == 1495676)
assert (model != dicenet_w1 or weight_count == 1805604)
assert (model != dicenet_w5d4 or weight_count == 2162888)
assert (model != dicenet_w3d2 or weight_count == 2652200)
assert (model != dicenet_w7d8 or weight_count == 3264932)
assert (model != dicenet_w2 or weight_count == 3979044)
if __name__ == "__main__":
_test()
| 29,544 | 31.431394 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/nvpattexp.py | """
Neural Voice Puppetry Audio-to-Expression net for speech-driven facial animation, implemented in TensorFlow.
Original paper: 'Neural Voice Puppetry: Audio-driven Facial Reenactment,' https://arxiv.org/abs/1912.05566.
"""
__all__ = ['NvpAttExp', 'nvpattexp116bazel76']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import DenseBlock, ConvBlock, ConvBlock1d, SelectableDense, SimpleSequential, is_channels_first
class NvpAttExpEncoder(nn.Layer):
"""
Neural Voice Puppetry Audio-to-Expression encoder.
Parameters:
----------
audio_features : int
Number of audio features (characters/sounds).
audio_window_size : int
Size of audio window (for time related audio features).
seq_len : int, default
Size of feature window.
encoder_features : int
Number of encoder features.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
audio_features,
audio_window_size,
seq_len,
encoder_features,
data_format="channels_last",
**kwargs):
super(NvpAttExpEncoder, self).__init__(**kwargs)
self.audio_features = audio_features
self.audio_window_size = audio_window_size
self.seq_len = seq_len
self.data_format = data_format
conv_channels = (32, 32, 64, 64)
conv_slopes = (0.02, 0.02, 0.2, 0.2)
fc_channels = (128, 64, encoder_features)
fc_slopes = (0.02, 0.02, None)
att_conv_channels = (16, 8, 4, 2, 1)
att_conv_slopes = 0.02
in_channels = audio_features
self.conv_branch = SimpleSequential(name="conv_branch")
for i, (out_channels, slope) in enumerate(zip(conv_channels, conv_slopes)):
self.conv_branch.add(ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 1),
strides=(2, 1),
padding=(1, 0),
use_bias=True,
use_bn=False,
activation=nn.LeakyReLU(alpha=slope),
data_format=data_format,
name="conv{}".format(i + 1)))
in_channels = out_channels
self.fc_branch = SimpleSequential(name="fc_branch")
for i, (out_channels, slope) in enumerate(zip(fc_channels, fc_slopes)):
activation = nn.LeakyReLU(alpha=slope) if slope is not None else "tanh"
self.fc_branch.add(DenseBlock(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
use_bn=False,
activation=activation,
data_format=data_format,
name="fc{}".format(i + 1)))
in_channels = out_channels
self.att_conv_branch = SimpleSequential(name="att_conv_branch")
for i, out_channels, in enumerate(att_conv_channels):
self.att_conv_branch.add(ConvBlock1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=1,
padding=1,
use_bias=True,
use_bn=False,
activation=nn.LeakyReLU(alpha=att_conv_slopes),
data_format=data_format,
name="att_conv{}".format(i + 1)))
in_channels = out_channels
self.att_fc = DenseBlock(
in_channels=seq_len,
out_channels=seq_len,
use_bias=True,
use_bn=False,
activation=nn.Softmax(axis=1),
data_format=data_format,
name="att_fc")
def call(self, x, training=None):
batch = x.shape[0]
batch_seq_len = batch * self.seq_len
if is_channels_first(self.data_format):
x = tf.reshape(x, shape=(-1, 1, self.audio_window_size, self.audio_features))
x = tf.transpose(x, perm=(0, 3, 2, 1))
x = self.conv_branch(x)
x = tf.squeeze(x, axis=-1)
x = tf.reshape(x, shape=(batch_seq_len, 1, -1))
x = self.fc_branch(x)
x = tf.reshape(x, shape=(batch, self.seq_len, -1))
x = tf.transpose(x, perm=(0, 2, 1))
y = x[:, :, (self.seq_len // 2)]
w = self.att_conv_branch(x)
w = tf.squeeze(w, axis=1)
w = self.att_fc(w)
w = tf.expand_dims(w, axis=-1)
else:
x = tf.transpose(x, perm=(0, 3, 1, 2))
x = tf.reshape(x, shape=(-1, 1, self.audio_window_size, self.audio_features))
x = tf.transpose(x, perm=(0, 2, 3, 1))
x = tf.transpose(x, perm=(0, 1, 3, 2))
x = self.conv_branch(x)
x = tf.squeeze(x, axis=1)
x = self.fc_branch(x)
x = tf.reshape(x, shape=(batch, self.seq_len, -1))
y = x[:, (self.seq_len // 2), :]
w = self.att_conv_branch(x)
w = tf.squeeze(w, axis=-1)
w = self.att_fc(w)
w = tf.expand_dims(w, axis=-1)
x = tf.transpose(x, perm=(0, 2, 1))
x = tf.keras.backend.batch_dot(x, w)
x = tf.squeeze(x, axis=-1)
return x, y
class NvpAttExp(tf.keras.Model):
"""
Neural Voice Puppetry Audio-to-Expression model from 'Neural Voice Puppetry: Audio-driven Facial Reenactment,'
https://arxiv.org/abs/1912.05566.
Parameters:
----------
audio_features : int, default 29
Number of audio features (characters/sounds).
audio_window_size : int, default 16
Size of audio window (for time related audio features).
seq_len : int, default 8
Size of feature window.
base_persons : int, default 116
Number of base persons (identities).
blendshapes : int, default 76
Number of 3D model blendshapes.
encoder_features : int, default 32
Number of encoder features.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
audio_features=29,
audio_window_size=16,
seq_len=8,
base_persons=116,
blendshapes=76,
encoder_features=32,
data_format="channels_last",
**kwargs):
super(NvpAttExp, self).__init__(**kwargs)
self.base_persons = base_persons
self.data_format = data_format
self.encoder = NvpAttExpEncoder(
audio_features=audio_features,
audio_window_size=audio_window_size,
seq_len=seq_len,
encoder_features=encoder_features,
data_format=data_format,
name="encoder")
self.decoder = SelectableDense(
in_channels=encoder_features,
out_channels=blendshapes,
use_bias=False,
num_options=base_persons,
name="decoder")
def call(self, x, pid, training=None):
x, y = self.encoder(x, training=training)
x = self.decoder(x, pid)
y = self.decoder(y, pid)
return x, y
def get_nvpattexp(base_persons,
blendshapes,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create Neural Voice Puppetry Audio-to-Expression model with specific parameters.
Parameters:
----------
base_persons : int
Number of base persons (subjects).
blendshapes : int
Number of 3D model blendshapes.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = NvpAttExp(
base_persons=base_persons,
blendshapes=blendshapes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def nvpattexp116bazel76(**kwargs):
"""
Neural Voice Puppetry Audio-to-Expression model for 116 base persons and Bazel topology with 76 blendshapes from
'Neural Voice Puppetry: Audio-driven Facial Reenactment,' https://arxiv.org/abs/1912.05566.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_nvpattexp(base_persons=116, blendshapes=76, model_name="nvpattexp116bazel76", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
# data_format = "channels_first"
data_format = "channels_last"
pretrained = False
models = [
nvpattexp116bazel76,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
seq_len = 8
audio_window_size = 16
audio_features = 29
blendshapes = 76
x = tf.random.normal((batch, seq_len, audio_window_size, audio_features) if is_channels_first(data_format) else
(batch, audio_window_size, audio_features, seq_len))
pid = tf.fill(dims=(batch,), value=3)
y1, y2 = net(x, pid)
assert (y1.shape == y2.shape == (batch, blendshapes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != nvpattexp116bazel76 or weight_count == 327397)
if __name__ == "__main__":
_test()
| 10,488 | 34.435811 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/alexnet.py | """
AlexNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
"""
__all__ = ['AlexNet', 'alexnet', 'alexnetb']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import ConvBlock, MaxPool2d, SimpleSequential, flatten, is_channels_first
class AlexConv(ConvBlock):
"""
AlexNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
use_lrn : bool
Whether to use LRN layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
use_lrn,
data_format="channels_last",
**kwargs):
super(AlexConv, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
use_bn=False,
data_format=data_format,
**kwargs)
self.use_lrn = use_lrn
def call(self, x, training=None):
x = super(AlexConv, self).call(x, training=training)
if self.use_lrn:
x = tf.nn.lrn(x, bias=2, alpha=1e-4, beta=0.75)
return x
class AlexDense(nn.Layer):
"""
AlexNet specific dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(AlexDense, self).__init__(**kwargs)
self.fc = nn.Dense(
units=out_channels,
input_dim=in_channels,
name="fc")
self.activ = nn.ReLU()
self.dropout = nn.Dropout(
rate=0.5,
name="dropout")
def call(self, x, training=None):
x = self.fc(x)
x = self.activ(x)
x = self.dropout(x, training=training)
return x
class AlexOutputBlock(nn.Layer):
"""
AlexNet specific output block.
Parameters:
----------
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
classes,
**kwargs):
super(AlexOutputBlock, self).__init__(**kwargs)
mid_channels = 4096
self.fc1 = AlexDense(
in_channels=in_channels,
out_channels=mid_channels,
name="fc1")
self.fc2 = AlexDense(
in_channels=mid_channels,
out_channels=mid_channels,
name="fc2")
self.fc3 = nn.Dense(
units=classes,
input_dim=mid_channels,
name="fc3")
def call(self, x, training=None):
x = self.fc1(x, training=training)
x = self.fc2(x, training=training)
x = self.fc3(x)
return x
class AlexNet(tf.keras.Model):
"""
AlexNet model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
kernel_sizes : list of list of int
Convolution window sizes for each unit.
strides : list of list of int or tuple/list of 2 int
Strides of the convolution for each unit.
paddings : list of list of int or tuple/list of 2 int
Padding value for convolution layer for each unit.
use_lrn : bool
Whether to use LRN layer.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
kernel_sizes,
strides,
paddings,
use_lrn,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(AlexNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
for i, channels_per_stage in enumerate(channels):
use_lrn_i = use_lrn and (i in [0, 1])
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
stage.add(AlexConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_sizes[i][j],
strides=strides[i][j],
padding=paddings[i][j],
use_lrn=use_lrn_i,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
stage.add(MaxPool2d(
pool_size=3,
strides=2,
padding=0,
ceil_mode=True,
data_format=data_format,
name="pool{}".format(i + 1)))
self.features.add(stage)
in_channels = in_channels * 6 * 6
self.output1 = AlexOutputBlock(
in_channels=in_channels,
classes=classes,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x, training=training)
return x
def get_alexnet(version="a",
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create AlexNet model with specific parameters.
Parameters:
----------
version : str, default 'a'
Version of AlexNet ('a' or 'b').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "a":
channels = [[96], [256], [384, 384, 256]]
kernel_sizes = [[11], [5], [3, 3, 3]]
strides = [[4], [1], [1, 1, 1]]
paddings = [[0], [2], [1, 1, 1]]
use_lrn = True
elif version == "b":
channels = [[64], [192], [384, 256, 256]]
kernel_sizes = [[11], [5], [3, 3, 3]]
strides = [[4], [1], [1, 1, 1]]
paddings = [[2], [2], [1, 1, 1]]
use_lrn = False
else:
raise ValueError("Unsupported AlexNet version {}".format(version))
net = AlexNet(
channels=channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
use_lrn=use_lrn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def alexnet(**kwargs):
"""
AlexNet model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_alexnet(model_name="alexnet", **kwargs)
def alexnetb(**kwargs):
"""
AlexNet-b model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997. Non-standard version.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_alexnet(version="b", model_name="alexnetb", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
alexnet,
alexnetb,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != alexnet or weight_count == 62378344)
assert (model != alexnetb or weight_count == 61100840)
if __name__ == "__main__":
_test()
| 10,247 | 29.960725 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/mobilenet_cub.py | """
MobileNet & FD-MobileNet for CUB-200-2011, implemented in TensorFlow.
Original papers:
- 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
- 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750.
"""
__all__ = ['mobilenet_w1_cub', 'mobilenet_w3d4_cub', 'mobilenet_wd2_cub', 'mobilenet_wd4_cub', 'fdmobilenet_w1_cub',
'fdmobilenet_w3d4_cub', 'fdmobilenet_wd2_cub', 'fdmobilenet_wd4_cub']
from .common import is_channels_first
from .mobilenet import get_mobilenet
from .fdmobilenet import get_fdmobilenet
def mobilenet_w1_cub(classes=200, **kwargs):
"""
1.0 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile
Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenet(classes=classes, width_scale=1.0, model_name="mobilenet_w1_cub", **kwargs)
def mobilenet_w3d4_cub(classes=200, **kwargs):
"""
0.75 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile
Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenet(classes=classes, width_scale=0.75, model_name="mobilenet_w3d4_cub", **kwargs)
def mobilenet_wd2_cub(classes=200, **kwargs):
"""
0.5 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile
Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenet(classes=classes, width_scale=0.5, model_name="mobilenet_wd2_cub", **kwargs)
def mobilenet_wd4_cub(classes=200, **kwargs):
"""
0.25 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile
Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenet(classes=classes, width_scale=0.25, model_name="mobilenet_wd4_cub", **kwargs)
def fdmobilenet_w1_cub(classes=200, **kwargs):
"""
FD-MobileNet 1.0x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(classes=classes, width_scale=1.0, model_name="fdmobilenet_w1_cub", **kwargs)
def fdmobilenet_w3d4_cub(classes=200, **kwargs):
"""
FD-MobileNet 0.75x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(classes=classes, width_scale=0.75, model_name="fdmobilenet_w3d4_cub", **kwargs)
def fdmobilenet_wd2_cub(classes=200, **kwargs):
"""
FD-MobileNet 0.5x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(classes=classes, width_scale=0.5, model_name="fdmobilenet_wd2_cub", **kwargs)
def fdmobilenet_wd4_cub(classes=200, **kwargs):
"""
FD-MobileNet 0.25x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(classes=classes, width_scale=0.25, model_name="fdmobilenet_wd4_cub", **kwargs)
def _test():
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
mobilenet_w1_cub,
mobilenet_w3d4_cub,
mobilenet_wd2_cub,
mobilenet_wd4_cub,
fdmobilenet_w1_cub,
fdmobilenet_w3d4_cub,
fdmobilenet_wd2_cub,
fdmobilenet_wd4_cub,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 200))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenet_w1_cub or weight_count == 3411976)
assert (model != mobilenet_w3d4_cub or weight_count == 1970360)
assert (model != mobilenet_wd2_cub or weight_count == 921192)
assert (model != mobilenet_wd4_cub or weight_count == 264472)
assert (model != fdmobilenet_w1_cub or weight_count == 2081288)
assert (model != fdmobilenet_w3d4_cub or weight_count == 1218104)
assert (model != fdmobilenet_wd2_cub or weight_count == 583528)
assert (model != fdmobilenet_wd4_cub or weight_count == 177560)
if __name__ == "__main__":
_test()
| 7,245 | 35.969388 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/wrn.py | """
WRN for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
"""
__all__ = ['WRN', 'wrn50_2']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import Conv2d, MaxPool2d, SimpleSequential, flatten, is_channels_first
class WRNConv(nn.Layer):
"""
WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
activate,
data_format="channels_last",
**kwargs):
super(WRNConv, self).__init__(**kwargs)
self.activate = activate
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
data_format=data_format,
name="conv")
if self.activate:
self.activ = nn.ReLU()
def call(self, x, training=None):
x = self.conv(x)
if self.activate:
x = self.activ(x)
return x
def wrn_conv1x1(in_channels,
out_channels,
strides,
activate,
data_format="channels_last",
**kwargs):
"""
1x1 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
activate=activate,
data_format=data_format,
**kwargs)
def wrn_conv3x3(in_channels,
out_channels,
strides,
activate,
data_format="channels_last",
**kwargs):
"""
3x3 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=1,
activate=activate,
data_format=data_format,
**kwargs)
class WRNBottleneck(nn.Layer):
"""
WRN bottleneck block for residual path in WRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
width_factor : float
Wide scale factor for width of layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
width_factor,
data_format="channels_last",
**kwargs):
super(WRNBottleneck, self).__init__(**kwargs)
mid_channels = int(round(out_channels // 4 * width_factor))
self.conv1 = wrn_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
strides=1,
activate=True,
data_format=data_format,
name="conv1")
self.conv2 = wrn_conv3x3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activate=True,
data_format=data_format,
name="conv2")
self.conv3 = wrn_conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
strides=1,
activate=False,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class WRNUnit(nn.Layer):
"""
WRN unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
width_factor : float
Wide scale factor for width of layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
width_factor,
data_format="channels_last",
**kwargs):
super(WRNUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = WRNBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
width_factor=width_factor,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = wrn_conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activate=False,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
return x
class WRNInitBlock(nn.Layer):
"""
WRN specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(WRNInitBlock, self).__init__(**kwargs)
self.conv = WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
activate=True,
data_format=data_format,
name="conv")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.pool(x)
return x
class WRN(tf.keras.Model):
"""
WRN model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
width_factor : float
Wide scale factor for width of layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
width_factor,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(WRN, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(WRNInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(WRNUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
width_factor=width_factor,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_wrn(blocks,
width_factor,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create WRN model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
width_factor : float
Wide scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported WRN with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = WRN(
channels=channels,
init_block_channels=init_block_channels,
width_factor=width_factor,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def wrn50_2(**kwargs):
"""
WRN-50-2 model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_wrn(blocks=50, width_factor=2.0, model_name="wrn50_2", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
wrn50_2,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != wrn50_2 or weight_count == 68849128)
if __name__ == "__main__":
_test()
| 13,742 | 28.941176 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/inceptionv3.py | """
InceptionV3 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Rethinking the Inception Architecture for Computer Vision,'
https://arxiv.org/abs/1512.00567.
"""
__all__ = ['InceptionV3', 'inceptionv3', 'MaxPoolBranch', 'AvgPoolBranch', 'Conv1x1Branch', 'ConvSeqBranch']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import MaxPool2d, AvgPool2d, ConvBlock, conv1x1_block, conv3x3_block, SimpleSequential, Concurrent,\
flatten, is_channels_first, get_channel_axis
class MaxPoolBranch(nn.Layer):
"""
Inception specific max pooling branch block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(MaxPoolBranch, self).__init__(**kwargs)
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=0,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.pool(x)
return x
class AvgPoolBranch(nn.Layer):
"""
Inception specific average pooling branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
count_include_pad : bool, default True
Whether to include the zero-padding in the averaging calculation.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
count_include_pad=True,
data_format="channels_last",
**kwargs):
super(AvgPoolBranch, self).__init__(**kwargs)
assert (count_include_pad or not count_include_pad)
self.pool = AvgPool2d(
pool_size=3,
strides=1,
padding=1,
data_format=data_format,
name="pool")
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_eps=bn_eps,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.pool(x)
x = self.conv(x, training=training)
return x
class Conv1x1Branch(nn.Layer):
"""
Inception specific convolutional 1x1 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
data_format="channels_last",
**kwargs):
super(Conv1x1Branch, self).__init__(**kwargs)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_eps=bn_eps,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.conv(x, training=training)
return x
class ConvSeqBranch(nn.Layer):
"""
Inception specific convolutional sequence branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of tuple of int
List of numbers of output channels.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
kernel_size_list,
strides_list,
padding_list,
bn_eps,
data_format="channels_last",
**kwargs):
super(ConvSeqBranch, self).__init__(**kwargs)
assert (len(out_channels_list) == len(kernel_size_list))
assert (len(out_channels_list) == len(strides_list))
assert (len(out_channels_list) == len(padding_list))
self.conv_list = SimpleSequential(name="conv_list")
for i, (out_channels, kernel_size, strides, padding) in enumerate(zip(
out_channels_list, kernel_size_list, strides_list, padding_list)):
self.conv_list.children.append(ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
bn_eps=bn_eps,
data_format=data_format,
name="conv{}".format(i + 1)))
in_channels = out_channels
def call(self, x, training=None):
x = self.conv_list(x, training=training)
return x
class ConvSeq3x3Branch(nn.Layer):
"""
InceptionV3 specific convolutional sequence branch block with splitting by 3x3.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of tuple of int
List of numbers of output channels.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
kernel_size_list,
strides_list,
padding_list,
bn_eps,
data_format="channels_last",
**kwargs):
super(ConvSeq3x3Branch, self).__init__(**kwargs)
self.data_format = data_format
self.conv_list = SimpleSequential(name="conv_list")
for i, (out_channels, kernel_size, strides, padding) in enumerate(zip(
out_channels_list, kernel_size_list, strides_list, padding_list)):
self.conv_list.children.append(ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
bn_eps=bn_eps,
data_format=data_format,
name="conv{}".format(i + 1)))
in_channels = out_channels
self.conv1x3 = ConvBlock(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=(1, 3),
strides=1,
padding=(0, 1),
bn_eps=bn_eps,
data_format=data_format,
name="conv1x3")
self.conv3x1 = ConvBlock(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=(3, 1),
strides=1,
padding=(1, 0),
bn_eps=bn_eps,
data_format=data_format,
name="conv3x1")
def call(self, x, training=None):
x = self.conv_list(x, training=training)
y1 = self.conv1x3(x, training=training)
y2 = self.conv3x1(x, training=training)
x = tf.concat([y1, y2], axis=get_channel_axis(self.data_format))
return x
class InceptionAUnit(nn.Layer):
"""
InceptionV3 type Inception-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptionAUnit, self).__init__(**kwargs)
assert (out_channels > 224)
pool_out_channels = out_channels - 224
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=64,
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(48, 64),
kernel_size_list=(1, 5),
strides_list=(1, 1),
padding_list=(0, 2),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96, 96),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_eps=bn_eps,
data_format=data_format,
name="branch3"))
self.branches.children.append(AvgPoolBranch(
in_channels=in_channels,
out_channels=pool_out_channels,
bn_eps=bn_eps,
data_format=data_format,
name="branch4"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class ReductionAUnit(nn.Layer):
"""
InceptionV3 type Reduction-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
data_format="channels_last",
**kwargs):
super(ReductionAUnit, self).__init__(**kwargs)
assert (in_channels == 288)
assert (out_channels == 768)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(384,),
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96, 96),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(MaxPoolBranch(
data_format=data_format,
name="branch3"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptionBUnit(nn.Layer):
"""
InceptionV3 type Inception-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of output channels in the 7x7 branches.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptionBUnit, self).__init__(**kwargs)
assert (in_channels == 768)
assert (out_channels == 768)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=192,
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(mid_channels, mid_channels, 192),
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(mid_channels, mid_channels, mid_channels, mid_channels, 192),
kernel_size_list=(1, (7, 1), (1, 7), (7, 1), (1, 7)),
strides_list=(1, 1, 1, 1, 1),
padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3)),
bn_eps=bn_eps,
data_format=data_format,
name="branch3"))
self.branches.children.append(AvgPoolBranch(
in_channels=in_channels,
out_channels=192,
bn_eps=bn_eps,
data_format=data_format,
name="branch4"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class ReductionBUnit(nn.Layer):
"""
InceptionV3 type Reduction-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
data_format="channels_last",
**kwargs):
super(ReductionBUnit, self).__init__(**kwargs)
assert (in_channels == 768)
assert (out_channels == 1280)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 320),
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 192, 192, 192),
kernel_size_list=(1, (1, 7), (7, 1), 3),
strides_list=(1, 1, 1, 2),
padding_list=(0, (0, 3), (3, 0), 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(MaxPoolBranch(
data_format=data_format,
name="branch3"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptionCUnit(nn.Layer):
"""
InceptionV3 type Inception-C unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptionCUnit, self).__init__(**kwargs)
assert (out_channels == 2048)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=320,
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeq3x3Branch(
in_channels=in_channels,
out_channels_list=(384,),
kernel_size_list=(1,),
strides_list=(1,),
padding_list=(0,),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(ConvSeq3x3Branch(
in_channels=in_channels,
out_channels_list=(448, 384),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
bn_eps=bn_eps,
data_format=data_format,
name="branch3"))
self.branches.children.append(AvgPoolBranch(
in_channels=in_channels,
out_channels=192,
bn_eps=bn_eps,
data_format=data_format,
name="branch4"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptInitBlock(nn.Layer):
"""
InceptionV3 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptInitBlock, self).__init__(**kwargs)
assert (out_channels == 192)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
strides=1,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
strides=1,
padding=1,
bn_eps=bn_eps,
data_format=data_format,
name="conv3")
self.pool1 = MaxPool2d(
pool_size=3,
strides=2,
padding=0,
data_format=data_format,
name="pool1")
self.conv4 = conv1x1_block(
in_channels=64,
out_channels=80,
strides=1,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv4")
self.conv5 = conv3x3_block(
in_channels=80,
out_channels=192,
strides=1,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv5")
self.pool2 = MaxPool2d(
pool_size=3,
strides=2,
padding=0,
data_format=data_format,
name="pool2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
x = self.pool1(x)
x = self.conv4(x, training=training)
x = self.conv5(x, training=training)
x = self.pool2(x)
return x
class InceptionV3(tf.keras.Model):
"""
InceptionV3 model from 'Rethinking the Inception Architecture for Computer Vision,'
https://arxiv.org/abs/1512.00567.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
b_mid_channels : list of int
Number of middle channels for each Inception-B unit.
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
b_mid_channels,
dropout_rate=0.5,
bn_eps=1e-5,
in_channels=3,
in_size=(299, 299),
classes=1000,
data_format="channels_last",
**kwargs):
super(InceptionV3, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
self.features = SimpleSequential(name="features")
self.features.add(InceptInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_eps=bn_eps,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
else:
unit = normal_units[i]
if unit == InceptionBUnit:
stage.add(unit(
in_channels=in_channels,
out_channels=out_channels,
mid_channels=b_mid_channels[j - 1],
bn_eps=bn_eps,
data_format=data_format,
name="unit{}".format(j + 1)))
else:
stage.add(unit(
in_channels=in_channels,
out_channels=out_channels,
bn_eps=bn_eps,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = SimpleSequential(name="output1")
self.output1.add(nn.Dropout(
rate=dropout_rate,
name="dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=in_channels,
name="fc"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_inceptionv3(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create InceptionV3 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 192
channels = [[256, 288, 288],
[768, 768, 768, 768, 768],
[1280, 2048, 2048]]
b_mid_channels = [128, 160, 160, 192]
net = InceptionV3(
channels=channels,
init_block_channels=init_block_channels,
b_mid_channels=b_mid_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def inceptionv3(**kwargs):
"""
InceptionV3 model from 'Rethinking the Inception Architecture for Computer Vision,'
https://arxiv.org/abs/1512.00567.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_inceptionv3(model_name="inceptionv3", bn_eps=1e-3, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
inceptionv3,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 299, 299) if is_channels_first(data_format) else (batch, 299, 299, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionv3 or weight_count == 23834568)
if __name__ == "__main__":
_test()
| 26,989 | 31.715152 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/fdmobilenet.py | """
FD-MobileNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
"""
__all__ = ['fdmobilenet_w1', 'fdmobilenet_w3d4', 'fdmobilenet_wd2', 'fdmobilenet_wd4', 'get_fdmobilenet']
import os
import tensorflow as tf
from .mobilenet import MobileNet
def get_fdmobilenet(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create FD-MobileNet model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 1024]]
first_stage_stride = True
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
net = MobileNet(
channels=channels,
first_stage_stride=first_stage_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def fdmobilenet_w1(**kwargs):
"""
FD-MobileNet 1.0x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(width_scale=1.0, model_name="fdmobilenet_w1", **kwargs)
def fdmobilenet_w3d4(**kwargs):
"""
FD-MobileNet 0.75x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(width_scale=0.75, model_name="fdmobilenet_w3d4", **kwargs)
def fdmobilenet_wd2(**kwargs):
"""
FD-MobileNet 0.5x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(width_scale=0.5, model_name="fdmobilenet_wd2", **kwargs)
def fdmobilenet_wd4(**kwargs):
"""
FD-MobileNet 0.25x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(width_scale=0.25, model_name="fdmobilenet_wd4", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
fdmobilenet_w1,
fdmobilenet_w3d4,
fdmobilenet_wd2,
fdmobilenet_wd4,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fdmobilenet_w1 or weight_count == 2901288)
assert (model != fdmobilenet_w3d4 or weight_count == 1833304)
assert (model != fdmobilenet_wd2 or weight_count == 993928)
assert (model != fdmobilenet_wd4 or weight_count == 383160)
if __name__ == "__main__":
_test()
| 4,966 | 31.677632 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/metrics/cls_metrics.py | """
Evaluation Metrics for Image Classification.
"""
import tensorflow as tf
from .metric import EvalMetric
__all__ = ['Top1Error', 'TopKError']
class Accuracy(EvalMetric):
"""
Computes accuracy classification score.
Parameters:
----------
axis : int, default 1
The axis that represents classes
name : str, default 'accuracy'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
axis=1,
name="accuracy",
output_names=None,
label_names=None):
super(Accuracy, self).__init__(
name,
axis=axis,
output_names=output_names,
label_names=label_names,
has_global_stats=True)
self.axis = axis
self.base_acc = tf.keras.metrics.SparseCategoricalAccuracy(name="acc")
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : tensor
The labels of the data with class indices as values, one per sample.
preds : tensor
Prediction values for samples. Each prediction value can either be the class index,
or a vector of likelihoods for all classes.
"""
self.base_acc.update_state(labels, preds)
def get(self):
"""
Gets the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
return self.name, float(self.base_acc.result().numpy())
class TopKAccuracy(EvalMetric):
"""
Computes top k predictions accuracy.
Parameters:
----------
top_k : int, default 1
Whether targets are in top k predictions.
name : str, default 'top_k_accuracy'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
top_k=1,
name="top_k_accuracy",
output_names=None,
label_names=None):
super(TopKAccuracy, self).__init__(
name,
top_k=top_k,
output_names=output_names,
label_names=label_names,
has_global_stats=True)
self.top_k = top_k
assert (self.top_k > 1), "Please use Accuracy if top_k is no more than 1"
self.name += "_{:d}".format(self.top_k)
self.base_acc = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5, name="topk_acc")
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : tensor
The labels of the data.
preds : tensor
Predicted values.
"""
self.base_acc.update_state(labels, preds)
def get(self):
"""
Gets the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
return self.name, float(self.base_acc.result().numpy())
class Top1Error(Accuracy):
"""
Computes top-1 error (inverted accuracy classification score).
Parameters:
----------
axis : int, default 1
The axis that represents classes.
name : str, default 'top_1_error'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
axis=1,
name="top_1_error",
output_names=None,
label_names=None):
super(Top1Error, self).__init__(
axis=axis,
name=name,
output_names=output_names,
label_names=label_names)
def get(self):
"""
Gets the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
return self.name, 1.0 - float(self.base_acc.result().numpy())
class TopKError(TopKAccuracy):
"""
Computes top-k error (inverted top k predictions accuracy).
Parameters:
----------
top_k : int
Whether targets are out of top k predictions, default 1
name : str, default 'top_k_error'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
top_k=1,
name="top_k_error",
output_names=None,
label_names=None):
name_ = name
super(TopKError, self).__init__(
top_k=top_k,
name=name,
output_names=output_names,
label_names=label_names)
self.name = name_.replace("_k_", "_{}_".format(top_k))
def get(self):
"""
Gets the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
return self.name, 1.0 - float(self.base_acc.result().numpy())
| 6,552 | 29.621495 | 95 | py |
imgclsmob | imgclsmob-master/tensorflow2/metrics/det_metrics.py | """
Evaluation Metrics for Object Detection.
"""
import warnings
import numpy as np
import mxnet as mx
__all__ = ['CocoDetMApMetric']
class CocoDetMApMetric(mx.metric.EvalMetric):
"""
Detection metric for COCO bbox task.
Parameters:
----------
img_height : int
Processed image height.
coco_annotations_file_path : str
COCO anotation file path.
contiguous_id_to_json : list of int
Processed IDs.
validation_ids : bool, default False
Whether to use temporary file for estimation.
use_file : bool, default False
Whether to use temporary file for estimation.
score_thresh : float, default 0.05
Detection results with confident scores smaller than `score_thresh` will be discarded before saving to results.
data_shape : tuple of int, default is None
If `data_shape` is provided as (height, width), we will rescale bounding boxes when saving the predictions.
This is helpful when SSD/YOLO box predictions cannot be rescaled conveniently. Note that the data_shape must be
fixed for all validation images.
post_affine : a callable function with input signature (orig_w, orig_h, out_w, out_h)
If not None, the bounding boxes will be affine transformed rather than simply scaled.
name : str, default 'mAP'
Name of this metric instance for display.
"""
def __init__(self,
img_height,
coco_annotations_file_path,
contiguous_id_to_json,
validation_ids=None,
use_file=False,
score_thresh=0.05,
data_shape=None,
post_affine=None,
name="mAP"):
super(CocoDetMApMetric, self).__init__(name=name)
self.img_height = img_height
self.coco_annotations_file_path = coco_annotations_file_path
self.contiguous_id_to_json = contiguous_id_to_json
self.validation_ids = validation_ids
self.use_file = use_file
self.score_thresh = score_thresh
self.current_idx = 0
self.coco_result = []
if isinstance(data_shape, (tuple, list)):
assert len(data_shape) == 2, "Data shape must be (height, width)"
elif not data_shape:
data_shape = None
else:
raise ValueError("data_shape must be None or tuple of int as (height, width)")
self._data_shape = data_shape
if post_affine is not None:
assert self._data_shape is not None, "Using post affine transform requires data_shape"
self._post_affine = post_affine
else:
self._post_affine = None
from pycocotools.coco import COCO
self.gt = COCO(self.coco_annotations_file_path)
self._img_ids = sorted(self.gt.getImgIds())
def reset(self):
self.current_idx = 0
self.coco_result = []
def get(self):
"""
Get evaluation metrics.
"""
if self.current_idx != len(self._img_ids):
warnings.warn("Recorded {} out of {} validation images, incomplete results".format(
self.current_idx, len(self._img_ids)))
from pycocotools.coco import COCO
gt = COCO(self.coco_annotations_file_path)
import tempfile
import json
with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f:
json.dump(self.coco_result, f)
f.flush()
pred = gt.loadRes(f.name)
from pycocotools.cocoeval import COCOeval
coco_eval = COCOeval(gt, pred, "bbox")
if self.validation_ids is not None:
coco_eval.params.imgIds = self.validation_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return self.name, tuple(coco_eval.stats[:3])
def update2(self,
pred_bboxes,
pred_labels,
pred_scores):
"""
Update internal buffer with latest predictions. Note that the statistics are not available until you call
self.get() to return the metrics.
Parameters:
----------
pred_bboxes : mxnet.NDArray or numpy.ndarray
Prediction bounding boxes with shape `B, N, 4`.
Where B is the size of mini-batch, N is the number of bboxes.
pred_labels : mxnet.NDArray or numpy.ndarray
Prediction bounding boxes labels with shape `B, N`.
pred_scores : mxnet.NDArray or numpy.ndarray
Prediction bounding boxes scores with shape `B, N`.
"""
def as_numpy(a):
"""
Convert a (list of) mx.NDArray into numpy.ndarray
"""
if isinstance(a, (list, tuple)):
out = [x.asnumpy() if isinstance(x, mx.nd.NDArray) else x for x in a]
return np.concatenate(out, axis=0)
elif isinstance(a, mx.nd.NDArray):
a = a.asnumpy()
return a
for pred_bbox, pred_label, pred_score in zip(*[as_numpy(x) for x in [pred_bboxes, pred_labels, pred_scores]]):
valid_pred = np.where(pred_label.flat >= 0)[0]
pred_bbox = pred_bbox[valid_pred, :].astype(np.float)
pred_label = pred_label.flat[valid_pred].astype(int)
pred_score = pred_score.flat[valid_pred].astype(np.float)
imgid = self._img_ids[self.current_idx]
self.current_idx += 1
affine_mat = None
if self._data_shape is not None:
entry = self.gt.loadImgs(imgid)[0]
orig_height = entry["height"]
orig_width = entry["width"]
height_scale = float(orig_height) / self._data_shape[0]
width_scale = float(orig_width) / self._data_shape[1]
if self._post_affine is not None:
affine_mat = self._post_affine(orig_width, orig_height, self._data_shape[1], self._data_shape[0])
else:
height_scale, width_scale = (1.0, 1.0)
# for each bbox detection in each image
for bbox, label, score in zip(pred_bbox, pred_label, pred_score):
if label not in self.contiguous_id_to_json:
# ignore non-exist class
continue
if score < self.score_thresh:
continue
category_id = self.contiguous_id_to_json[label]
# rescale bboxes/affine transform bboxes
if affine_mat is not None:
bbox[0:2] = self.affine_transform(bbox[0:2], affine_mat)
bbox[2:4] = self.affine_transform(bbox[2:4], affine_mat)
else:
bbox[[0, 2]] *= width_scale
bbox[[1, 3]] *= height_scale
# convert [xmin, ymin, xmax, ymax] to [xmin, ymin, w, h]
bbox[2:4] -= (bbox[:2] - 1)
self.coco_result.append({"image_id": imgid,
"category_id": category_id,
"bbox": bbox[:4].tolist(),
"score": score})
def update(self, labels, preds):
det_bboxes = []
det_ids = []
det_scores = []
for x_rr, y in zip(preds, labels):
bboxes = x_rr.slice_axis(axis=-1, begin=0, end=4)
ids = x_rr.slice_axis(axis=-1, begin=4, end=5).squeeze(axis=2)
scores = x_rr.slice_axis(axis=-1, begin=5, end=6).squeeze(axis=2)
det_ids.append(ids)
det_scores.append(scores)
# clip to image size
det_bboxes.append(bboxes.clip(0, self.img_height))
self.update2(det_bboxes, det_ids, det_scores)
@staticmethod
def affine_transform(pt, t):
"""
Apply affine transform to a bounding box given transform matrix t.
Parameters:
----------
pt : numpy.ndarray
Bounding box with shape (1, 2).
t : numpy.ndarray
Transformation matrix with shape (2, 3).
Returns:
-------
numpy.ndarray
New bounding box with shape (1, 2).
"""
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
| 8,392 | 38.219626 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/datasets/imagenet1k_cls_dataset.py | """
ImageNet-1K classification dataset.
"""
__all__ = ['ImageNet1KMetaInfo', 'load_image_imagenet1k_val']
import os
import math
import cv2
import numpy as np
from PIL import Image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import keras_preprocessing as keras_prep
from .dataset_metainfo import DatasetMetaInfo
from .cls_dataset import img_normalization
class ImageNet1KMetaInfo(DatasetMetaInfo):
"""
Descriptor of ImageNet-1K dataset.
"""
def __init__(self):
super(ImageNet1KMetaInfo, self).__init__()
self.label = "ImageNet1K"
self.short_label = "imagenet"
self.root_dir_name = "imagenet"
self.dataset_class = None
self.num_training_samples = None
self.in_channels = 3
self.num_classes = 1000
self.input_image_size = (224, 224)
self.resize_inv_factor = 0.875
self.train_metric_capts = ["Train.Top1"]
self.train_metric_names = ["Top1Error"]
self.train_metric_extra_kwargs = [{"name": "err-top1"}]
self.val_metric_capts = ["Val.Top1", "Val.Top5"]
self.val_metric_names = ["Top1Error", "TopKError"]
self.val_metric_extra_kwargs = [{"name": "err-top1"}, {"name": "err-top5", "top_k": 5}]
self.saver_acc_ind = 1
self.train_transform = imagenet_train_transform
self.val_transform = imagenet_val_transform
self.test_transform = imagenet_val_transform
self.train_generator = imagenet_train_generator
self.val_generator = imagenet_val_generator
self.test_generator = imagenet_val_generator
self.ml_type = "imgcls"
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
self.interpolation = "bilinear"
self.interpolation_msg = "bilinear"
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(ImageNet1KMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
default=self.input_image_size[0],
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=self.resize_inv_factor,
help="inverted ratio for input image crop")
parser.add_argument(
"--mean-rgb",
nargs=3,
type=float,
default=self.mean_rgb,
help="Mean of RGB channels in the dataset")
parser.add_argument(
"--std-rgb",
nargs=3,
type=float,
default=self.std_rgb,
help="STD of RGB channels in the dataset")
parser.add_argument(
"--interpolation",
type=str,
default=self.interpolation,
help="Preprocessing interpolation")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(ImageNet1KMetaInfo, self).update(args)
self.input_image_size = (args.input_size, args.input_size)
self.mean_rgb = args.mean_rgb
self.std_rgb = args.std_rgb
self.interpolation = args.interpolation
if self.interpolation == "nearest":
self.interpolation_msg = self.interpolation
else:
self.interpolation_msg = "{}:{}".format(self.interpolation, self.resize_inv_factor)
import keras_preprocessing as keras_prep
keras_prep.image.iterator.load_img = load_image_imagenet1k_val
def resize(img,
size,
interpolation):
"""
Resize the input PIL Image to the given size via OpenCV.
Parameters:
----------
img : PIL.Image
input image.
size : int or tuple of (W, H)
Size of output image.
interpolation : int
Interpolation method for resizing.
Returns:
-------
PIL.Image
Resulted image.
"""
if interpolation == Image.NEAREST:
cv_interpolation = cv2.INTER_NEAREST
elif interpolation == Image.BILINEAR:
cv_interpolation = cv2.INTER_LINEAR
elif interpolation == Image.BICUBIC:
cv_interpolation = cv2.INTER_CUBIC
elif interpolation == Image.LANCZOS:
cv_interpolation = cv2.INTER_LANCZOS4
else:
raise ValueError("Invalid interpolation method: {}", interpolation)
cv_img = np.array(img)
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
out_size = (size, int(size * h / w))
else:
out_size = (int(size * w / h), size)
cv_img = cv2.resize(cv_img, dsize=out_size, interpolation=cv_interpolation)
return Image.fromarray(cv_img)
else:
cv_img = cv2.resize(cv_img, dsize=size, interpolation=cv_interpolation)
return Image.fromarray(cv_img)
def center_crop(img,
output_size):
"""
Crop the given PIL Image.
Parameters:
----------
img : PIL.Image
input image.
output_size : tuple of (W, H)
Size of output image.
Returns:
-------
PIL.Image
Resulted image.
"""
if isinstance(output_size, int):
output_size = (int(output_size), int(output_size))
w, h = img.size
th, tw = output_size
i = int(round((h - th) / 2.))
j = int(round((w - tw) / 2.))
return img.crop((j, i, j + tw, i + th))
def load_image_imagenet1k_val(path,
grayscale=False,
color_mode="rgb",
target_size=None,
interpolation="nearest"):
"""
Wraps keras_preprocessing.image.utils.load_img and apply center crop as in ImageNet-1K validation procedure.
# Arguments
path: Path to image file.
color_mode: One of "grayscale", 'rgb', 'rgba'. Default: 'rgb'.
The desired image format.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation and crop methods used to resample and crop the image
if the target size is different from that of the loaded image.
Methods are delimited by ":" where first part is interpolation and second is an inverted ratio for input
image crop, e.g. 'lanczos:0.875'.
Supported interpolation methods are 'nearest', 'bilinear', 'bicubic', 'lanczos',
'box', 'hamming' By default, 'nearest' is used.
# Returns:
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
interpolation, resize_inv_factor = interpolation.split(":") if ":" in interpolation else (interpolation, "none")
if resize_inv_factor == "none":
return keras_prep.image.utils.load_img(
path=path,
grayscale=grayscale,
color_mode=color_mode,
target_size=target_size,
interpolation=interpolation)
img = keras_prep.image.utils.load_img(
path=path,
grayscale=grayscale,
color_mode=color_mode,
target_size=None,
interpolation=interpolation)
if (target_size is None) or (img.size == (target_size[1], target_size[0])):
return img
try:
resize_inv_factor = float(resize_inv_factor)
except ValueError:
raise ValueError("Invalid crop inverted ratio: {}", resize_inv_factor)
if interpolation not in keras_prep.image.utils._PIL_INTERPOLATION_METHODS:
raise ValueError("Invalid interpolation method {} specified. Supported methods are {}".format(
interpolation,
", ".join(keras_prep.image.utils._PIL_INTERPOLATION_METHODS.keys())))
resample = keras_prep.image.utils._PIL_INTERPOLATION_METHODS[interpolation]
resize_value = int(math.ceil(float(target_size[0]) / resize_inv_factor))
img = resize(
img=img,
size=resize_value,
interpolation=resample)
return center_crop(
img=img,
output_size=target_size)
def imagenet_train_transform(ds_metainfo,
data_format="channels_last"):
"""
Create image transform sequence for training subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
ImageDataGenerator
Image transform sequence.
"""
data_generator = ImageDataGenerator(
preprocessing_function=(lambda img: img_normalization(
img=img,
mean_rgb=ds_metainfo.mean_rgb,
std_rgb=ds_metainfo.std_rgb)),
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
data_format=data_format)
return data_generator
def imagenet_val_transform(ds_metainfo,
data_format="channels_last"):
"""
Create image transform sequence for validation subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
ImageDataGenerator
Image transform sequence.
"""
data_generator = ImageDataGenerator(
preprocessing_function=(lambda img: img_normalization(
img=img,
mean_rgb=ds_metainfo.mean_rgb,
std_rgb=ds_metainfo.std_rgb)),
data_format=data_format)
return data_generator
def imagenet_train_generator(data_generator,
ds_metainfo,
batch_size):
"""
Create image generator for training subset.
Parameters:
----------
data_generator : ImageDataGenerator
Image transform sequence.
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
batch_size : int
Batch size.
Returns:
-------
Sequential
Image transform sequence.
"""
split = "train"
root = ds_metainfo.root_dir_path
root = os.path.join(root, split)
generator = data_generator.flow_from_directory(
directory=root,
target_size=ds_metainfo.input_image_size,
class_mode="binary",
batch_size=batch_size,
shuffle=False,
interpolation=ds_metainfo.interpolation_msg)
return generator
def imagenet_val_generator(data_generator,
ds_metainfo,
batch_size):
"""
Create image generator for validation subset.
Parameters:
----------
data_generator : ImageDataGenerator
Image transform sequence.
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
batch_size : int
Batch size.
Returns:
-------
Sequential
Image transform sequence.
"""
split = "val"
root = ds_metainfo.root_dir_path
root = os.path.join(root, split)
generator = data_generator.flow_from_directory(
directory=root,
target_size=ds_metainfo.input_image_size,
class_mode="binary",
batch_size=batch_size,
shuffle=False,
interpolation=ds_metainfo.interpolation_msg)
return generator
| 11,999 | 30.496063 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow2/datasets/coco_hpe1_dataset.py | """
COCO keypoint detection (2D single human pose estimation) dataset.
"""
import os
import threading
import copy
import cv2
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator, DirectoryIterator
from .dataset_metainfo import DatasetMetaInfo
class CocoHpe1Dataset(object):
"""
COCO keypoint detection (2D single human pose estimation) dataset.
Parameters:
----------
root : string
Path to `annotations`, `train2017`, and `val2017` folders.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
splits : list of str, default ['person_keypoints_val2017']
Json annotations name.
Candidates can be: person_keypoints_val2017, person_keypoints_train2017.
check_centers : bool, default is False
If true, will force check centers of bbox and keypoints, respectively.
If centers are far away from each other, remove this label.
skip_empty : bool, default is False
Whether skip entire image if no valid label is found. Use `False` if this dataset is
for validation to avoid COCO metric error.
"""
CLASSES = ["person"]
KEYPOINTS = {
0: "nose",
1: "left_eye",
2: "right_eye",
3: "left_ear",
4: "right_ear",
5: "left_shoulder",
6: "right_shoulder",
7: "left_elbow",
8: "right_elbow",
9: "left_wrist",
10: "right_wrist",
11: "left_hip",
12: "right_hip",
13: "left_knee",
14: "right_knee",
15: "left_ankle",
16: "right_ankle"
}
SKELETON = [
[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8],
[7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]
def __init__(self,
root,
mode="train",
transform=None,
splits=("person_keypoints_val2017",),
check_centers=False,
skip_empty=True):
self._root = os.path.expanduser(root)
self.mode = mode
self.transform = transform
self.num_class = len(self.CLASSES)
if isinstance(splits, str):
splits = [splits]
self._splits = splits
self._coco = []
self._check_centers = check_centers
self._skip_empty = skip_empty
self.index_map = dict(zip(type(self).CLASSES, range(self.num_class)))
self.json_id_to_contiguous = None
self.contiguous_id_to_json = None
self._items, self._labels = self._load_jsons()
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json")
self.annotations_file_path = annotations_file_path
def __str__(self):
detail = ",".join([str(s) for s in self._splits])
return self.__class__.__name__ + "(" + detail + ")"
@property
def classes(self):
"""
Category names.
"""
return type(self).CLASSES
@property
def num_joints(self):
"""
Dataset defined: number of joints provided.
"""
return 17
@property
def joint_pairs(self):
"""
Joint pairs which defines the pairs of joint to be swapped
when the image is flipped horizontally.
"""
return [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
@property
def coco(self):
"""
Return pycocotools object for evaluation purposes.
"""
if not self._coco:
raise ValueError("No coco objects found, dataset not initialized.")
if len(self._coco) > 1:
raise NotImplementedError(
"Currently we don't support evaluating {} JSON files".format(len(self._coco)))
return self._coco[0]
def __len__(self):
return len(self._items)
def __getitem__(self, idx):
img_path = self._items[idx]
img_id = int(os.path.splitext(os.path.basename(img_path))[0])
label = copy.deepcopy(self._labels[idx])
# img = mx.image.imread(img_path, 1)
img = cv2.imread(img_path, flags=cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB)
if self.transform is not None:
img, scale, center, score = self.transform(img, label)
# print("center={}".format(center))
# print("scale={}".format(scale))
res_label = np.array([float(img_id)] + [float(score)] + list(center) + list(scale), np.float32)
return img, res_label
def _load_jsons(self):
"""
Load all image paths and labels from JSON annotation files into buffer.
"""
items = []
labels = []
from pycocotools.coco import COCO
for split in self._splits:
anno = os.path.join(self._root, "annotations", split) + ".json"
_coco = COCO(anno)
self._coco.append(_coco)
classes = [c["name"] for c in _coco.loadCats(_coco.getCatIds())]
if not classes == self.classes:
raise ValueError("Incompatible category names with COCO: ")
assert classes == self.classes
json_id_to_contiguous = {
v: k for k, v in enumerate(_coco.getCatIds())}
if self.json_id_to_contiguous is None:
self.json_id_to_contiguous = json_id_to_contiguous
self.contiguous_id_to_json = {
v: k for k, v in self.json_id_to_contiguous.items()}
else:
assert self.json_id_to_contiguous == json_id_to_contiguous
# iterate through the annotations
image_ids = sorted(_coco.getImgIds())
for entry in _coco.loadImgs(image_ids):
dirname, filename = entry["coco_url"].split("/")[-2:]
abs_path = os.path.join(self._root, dirname, filename)
if not os.path.exists(abs_path):
raise IOError("Image: {} not exists.".format(abs_path))
label = self._check_load_keypoints(_coco, entry)
if not label:
continue
# num of items are relative to person, not image
for obj in label:
items.append(abs_path)
labels.append(obj)
return items, labels
def _check_load_keypoints(self, coco, entry):
"""
Check and load ground-truth keypoints.
"""
ann_ids = coco.getAnnIds(imgIds=entry["id"], iscrowd=False)
objs = coco.loadAnns(ann_ids)
# check valid bboxes
valid_objs = []
width = entry["width"]
height = entry["height"]
for obj in objs:
contiguous_cid = self.json_id_to_contiguous[obj["category_id"]]
if contiguous_cid >= self.num_class:
# not class of interest
continue
if max(obj["keypoints"]) == 0:
continue
# convert from (x, y, w, h) to (xmin, ymin, xmax, ymax) and clip bound
xmin, ymin, xmax, ymax = self.bbox_clip_xyxy(self.bbox_xywh_to_xyxy(obj["bbox"]), width, height)
# require non-zero box area
if obj['area'] <= 0 or xmax <= xmin or ymax <= ymin:
continue
# joints 3d: (num_joints, 3, 2); 3 is for x, y, z; 2 is for position, visibility
joints_3d = np.zeros((self.num_joints, 3, 2), dtype=np.float32)
for i in range(self.num_joints):
joints_3d[i, 0, 0] = obj["keypoints"][i * 3 + 0]
joints_3d[i, 1, 0] = obj["keypoints"][i * 3 + 1]
# joints_3d[i, 2, 0] = 0
visible = min(1, obj["keypoints"][i * 3 + 2])
joints_3d[i, :2, 1] = visible
# joints_3d[i, 2, 1] = 0
if np.sum(joints_3d[:, 0, 1]) < 1:
# no visible keypoint
continue
if self._check_centers:
bbox_center, bbox_area = self._get_box_center_area((xmin, ymin, xmax, ymax))
kp_center, num_vis = self._get_keypoints_center_count(joints_3d)
ks = np.exp(-2 * np.sum(np.square(bbox_center - kp_center)) / bbox_area)
if (num_vis / 80.0 + 47 / 80.0) > ks:
continue
valid_objs.append({
"bbox": (xmin, ymin, xmax, ymax),
"joints_3d": joints_3d
})
if not valid_objs:
if not self._skip_empty:
# dummy invalid labels if no valid objects are found
valid_objs.append({
"bbox": np.array([-1, -1, 0, 0]),
"joints_3d": np.zeros((self.num_joints, 3, 2), dtype=np.float32)
})
return valid_objs
@staticmethod
def _get_box_center_area(bbox):
"""
Get bbox center.
"""
c = np.array([(bbox[0] + bbox[2]) / 2.0, (bbox[1] + bbox[3]) / 2.0])
area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0])
return c, area
@staticmethod
def _get_keypoints_center_count(keypoints):
"""
Get geometric center of all keypoints.
"""
keypoint_x = np.sum(keypoints[:, 0, 0] * (keypoints[:, 0, 1] > 0))
keypoint_y = np.sum(keypoints[:, 1, 0] * (keypoints[:, 1, 1] > 0))
num = float(np.sum(keypoints[:, 0, 1]))
return np.array([keypoint_x / num, keypoint_y / num]), num
@staticmethod
def bbox_clip_xyxy(xyxy, width, height):
"""
Clip bounding box with format (xmin, ymin, xmax, ymax) to specified boundary.
All bounding boxes will be clipped to the new region `(0, 0, width, height)`.
Parameters:
----------
xyxy : list, tuple or numpy.ndarray
The bbox in format (xmin, ymin, xmax, ymax).
If numpy.ndarray is provided, we expect multiple bounding boxes with
shape `(N, 4)`.
width : int or float
Boundary width.
height : int or float
Boundary height.
Returns:
-------
tuple or np.array
Description of returned object.
"""
if isinstance(xyxy, (tuple, list)):
if not len(xyxy) == 4:
raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xyxy)))
x1 = np.minimum(width - 1, np.maximum(0, xyxy[0]))
y1 = np.minimum(height - 1, np.maximum(0, xyxy[1]))
x2 = np.minimum(width - 1, np.maximum(0, xyxy[2]))
y2 = np.minimum(height - 1, np.maximum(0, xyxy[3]))
return x1, y1, x2, y2
elif isinstance(xyxy, np.ndarray):
if not xyxy.size % 4 == 0:
raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xyxy.shape))
x1 = np.minimum(width - 1, np.maximum(0, xyxy[:, 0]))
y1 = np.minimum(height - 1, np.maximum(0, xyxy[:, 1]))
x2 = np.minimum(width - 1, np.maximum(0, xyxy[:, 2]))
y2 = np.minimum(height - 1, np.maximum(0, xyxy[:, 3]))
return np.hstack((x1, y1, x2, y2))
else:
raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xyxy)))
@staticmethod
def bbox_xywh_to_xyxy(xywh):
"""
Convert bounding boxes from format (xmin, ymin, w, h) to (xmin, ymin, xmax, ymax)
Parameters:
----------
xywh : list, tuple or numpy.ndarray
The bbox in format (x, y, w, h).
If numpy.ndarray is provided, we expect multiple bounding boxes with
shape `(N, 4)`.
Returns:
-------
tuple or np.ndarray
The converted bboxes in format (xmin, ymin, xmax, ymax).
If input is numpy.ndarray, return is numpy.ndarray correspondingly.
"""
if isinstance(xywh, (tuple, list)):
if not len(xywh) == 4:
raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xywh)))
w, h = np.maximum(xywh[2] - 1, 0), np.maximum(xywh[3] - 1, 0)
return xywh[0], xywh[1], xywh[0] + w, xywh[1] + h
elif isinstance(xywh, np.ndarray):
if not xywh.size % 4 == 0:
raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xywh.shape))
xyxy = np.hstack((xywh[:, :2], xywh[:, :2] + np.maximum(0, xywh[:, 2:4] - 1)))
return xyxy
else:
raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xywh)))
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpeValTransform1(object):
def __init__(self,
ds_metainfo):
# print("ds_metainfo.mean_rgb={}".format(ds_metainfo.mean_rgb))
# print("ds_metainfo.std_rgb={}".format(ds_metainfo.std_rgb))
self.ds_metainfo = ds_metainfo
self.image_size = self.ds_metainfo.input_image_size
height = self.image_size[0]
width = self.image_size[1]
self.aspect_ratio = float(width / height)
self.mean = ds_metainfo.mean_rgb
self.std = ds_metainfo.std_rgb
def __call__(self, src, label):
bbox = label["bbox"]
assert len(bbox) == 4
xmin, ymin, xmax, ymax = bbox
center, scale = _box_to_center_scale(xmin, ymin, xmax - xmin, ymax - ymin, self.aspect_ratio)
score = label.get("score", 1)
h, w = self.image_size
trans = get_affine_transform(center, scale, 0, [w, h])
img = cv2.warpAffine(src, trans, (int(w), int(h)), flags=cv2.INTER_LINEAR)
# img = mx.nd.image.to_tensor(mx.nd.array(img))
# img = mx.nd.image.normalize(img, mean=self.mean, std=self.std)
img = img.astype(np.float32)
img = img / 255.0
img = (img - np.array(self.mean, np.float32)) / np.array(self.std, np.float32)
return img, scale, center, score
def _box_to_center_scale(x, y, w, h, aspect_ratio=1.0, scale_mult=1.25):
pixel_std = 1
center = np.zeros((2,), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
if w > aspect_ratio * h:
h = w / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
scale = np.array(
[w * 1.0 / pixel_std, h * 1.0 / pixel_std], dtype=np.float32)
if center[0] != -1:
scale = scale * scale_mult
return center, scale
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def crop(img, center, scale, output_size, rot=0):
trans = get_affine_transform(center, scale, rot, output_size)
dst_img = cv2.warpAffine(
img,
trans,
(int(output_size[0]), int(output_size[1])),
flags=cv2.INTER_LINEAR)
return dst_img
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale])
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpeValTransform2(object):
def __init__(self,
ds_metainfo):
# print("ds_metainfo.mean_rgb={}".format(ds_metainfo.mean_rgb))
# print("ds_metainfo.std_rgb={}".format(ds_metainfo.std_rgb))
self.ds_metainfo = ds_metainfo
self.image_size = self.ds_metainfo.input_image_size
height = self.image_size[0]
width = self.image_size[1]
self.aspect_ratio = float(width / height)
self.mean = ds_metainfo.mean_rgb
self.std = ds_metainfo.std_rgb
def __call__(self, src, label):
# print(src.shape)
bbox = label["bbox"]
assert len(bbox) == 4
score = label.get('score', 1)
img, scale_box = detector_to_alpha_pose(
src,
class_ids=np.array([[0.]]),
scores=np.array([[1.]]),
bounding_boxs=np.array(np.array([bbox])),
output_shape=self.image_size)
if scale_box.shape[0] == 1:
pt1 = np.array(scale_box[0, (0, 1)], dtype=np.float32)
pt2 = np.array(scale_box[0, (2, 3)], dtype=np.float32)
else:
assert scale_box.shape[0] == 4
pt1 = np.array(scale_box[(0, 1)], dtype=np.float32)
pt2 = np.array(scale_box[(2, 3)], dtype=np.float32)
res_img = img[0].astype(np.float32)
res_img = res_img.transpose((1, 2, 0))
return res_img, pt1, pt2, score
def detector_to_alpha_pose(img,
class_ids,
scores,
bounding_boxs,
output_shape=(256, 192),
thr=0.5):
boxes, scores = alpha_pose_detection_processor(
img=img,
boxes=bounding_boxs,
class_idxs=class_ids,
scores=scores,
thr=thr)
pose_input, upscale_bbox = alpha_pose_image_cropper(
source_img=img,
boxes=boxes,
output_shape=output_shape)
return pose_input, upscale_bbox
def alpha_pose_detection_processor(img,
boxes,
class_idxs,
scores,
thr=0.5):
if len(boxes.shape) == 3:
boxes = boxes.squeeze(axis=0)
if len(class_idxs.shape) == 3:
class_idxs = class_idxs.squeeze(axis=0)
if len(scores.shape) == 3:
scores = scores.squeeze(axis=0)
# cilp coordinates
boxes[:, [0, 2]] = np.clip(boxes[:, [0, 2]], 0., img.shape[1] - 1)
boxes[:, [1, 3]] = np.clip(boxes[:, [1, 3]], 0., img.shape[0] - 1)
# select boxes
mask1 = (class_idxs == 0).astype(np.int32)
mask2 = (scores > thr).astype(np.int32)
picked_idxs = np.where((mask1 + mask2) > 1)[0]
if picked_idxs.shape[0] == 0:
return None, None
else:
return boxes[picked_idxs], scores[picked_idxs]
def alpha_pose_image_cropper(source_img,
boxes,
output_shape=(256, 192)):
if boxes is None:
return None, boxes
# crop person poses
img_width, img_height = source_img.shape[1], source_img.shape[0]
tensors = np.zeros([boxes.shape[0], 3, output_shape[0], output_shape[1]])
out_boxes = np.zeros([boxes.shape[0], 4])
for i, box in enumerate(boxes):
img = source_img.copy()
box_width = box[2] - box[0]
box_height = box[3] - box[1]
if box_width > 100:
scale_rate = 0.2
else:
scale_rate = 0.3
# crop image
left = int(max(0, box[0] - box_width * scale_rate / 2))
up = int(max(0, box[1] - box_height * scale_rate / 2))
right = int(min(img_width - 1, max(left + 5, box[2] + box_width * scale_rate / 2)))
bottom = int(min(img_height - 1, max(up + 5, box[3] + box_height * scale_rate / 2)))
crop_width = right - left
if crop_width < 1:
continue
crop_height = bottom - up
if crop_height < 1:
continue
ul = np.array((left, up))
br = np.array((right, bottom))
img = cv_cropBox(img, ul, br, output_shape[0], output_shape[1])
img = img.astype(np.float32)
img = img / 255.0
img = img.transpose((2, 0, 1))
# img = mx.nd.image.to_tensor(np.array(img))
# img = img.transpose((2, 0, 1))
img[0] = img[0] - 0.406
img[1] = img[1] - 0.457
img[2] = img[2] - 0.480
assert (img.shape[0] == 3)
tensors[i] = img
out_boxes[i] = (left, up, right, bottom)
return tensors, out_boxes
def cv_cropBox(img, ul, br, resH, resW, pad_val=0):
ul = ul
br = (br - 1)
# br = br.int()
lenH = max((br[1] - ul[1]).item(), (br[0] - ul[0]).item() * resH / resW)
lenW = lenH * resW / resH
if img.ndim == 2:
img = img[:, np.newaxis]
box_shape = [br[1] - ul[1], br[0] - ul[0]]
pad_size = [(lenH - box_shape[0]) // 2, (lenW - box_shape[1]) // 2]
# Padding Zeros
img[:ul[1], :, :], img[:, :ul[0], :] = pad_val, pad_val
img[br[1] + 1:, :, :], img[:, br[0] + 1:, :] = pad_val, pad_val
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = np.array([ul[0] - pad_size[1], ul[1] - pad_size[0]], np.float32)
src[1, :] = np.array([br[0] + pad_size[1], br[1] + pad_size[0]], np.float32)
dst[0, :] = 0
dst[1, :] = np.array([resW - 1, resH - 1], np.float32)
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(img, trans, (resW, resH), flags=cv2.INTER_LINEAR)
return dst_img
# ---------------------------------------------------------------------------------------------------------------------
def recalc_pose1(keypoints,
bbs,
image_size):
def transform_preds(coords, center, scale, output_size):
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
center = bbs[:, :2]
scale = bbs[:, 2:4]
heatmap_height = image_size[0] // 4
heatmap_width = image_size[1] // 4
output_size = [heatmap_width, heatmap_height]
preds = np.zeros_like(keypoints)
for i in range(keypoints.shape[0]):
preds[i] = transform_preds(keypoints[i], center[i], scale[i], output_size)
return preds
def recalc_pose1b(pred,
label,
image_size,
visible_conf_threshold=0.0):
label_img_id = label[:, 0].astype(np.int32)
label_score = label[:, 1]
label_bbs = label[:, 2:6]
pred_keypoints = pred[:, :, :2]
pred_score = pred[:, :, 2]
pred[:, :, :2] = recalc_pose1(pred_keypoints, label_bbs, image_size)
pred_person_score = []
batch = pred_keypoints.shape[0]
num_joints = pred_keypoints.shape[1]
for idx in range(batch):
kpt_score = 0
count = 0
for i in range(num_joints):
mval = float(pred_score[idx][i])
if mval > visible_conf_threshold:
kpt_score += mval
count += 1
if count > 0:
kpt_score /= count
kpt_score = kpt_score * float(label_score[idx])
pred_person_score.append(kpt_score)
return pred, pred_person_score, label_img_id
def recalc_pose2(keypoints,
bbs,
image_size):
def transformBoxInvert(pt, ul, br, resH, resW):
center = np.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * resH / resW)
lenW = lenH * resW / resH
_pt = (pt * lenH) / resH
if bool(((lenW - 1) / 2 - center[0]) > 0):
_pt[0] = _pt[0] - ((lenW - 1) / 2 - center[0])
if bool(((lenH - 1) / 2 - center[1]) > 0):
_pt[1] = _pt[1] - ((lenH - 1) / 2 - center[1])
new_point = np.zeros(2)
new_point[0] = _pt[0] + ul[0]
new_point[1] = _pt[1] + ul[1]
return new_point
pt2 = bbs[:, :2]
pt1 = bbs[:, 2:4]
heatmap_height = image_size[0] // 4
heatmap_width = image_size[1] // 4
preds = np.zeros_like(keypoints)
for i in range(keypoints.shape[0]):
for j in range(keypoints.shape[1]):
preds[i, j] = transformBoxInvert(keypoints[i, j], pt1[i], pt2[i], heatmap_height, heatmap_width)
return preds
def recalc_pose2b(pred,
label,
image_size,
visible_conf_threshold=0.0):
label_img_id = label[:, 0].astype(np.int32)
label_score = label[:, 1]
label_bbs = label[:, 2:6]
pred_keypoints = pred[:, :, :2]
pred_score = pred[:, :, 2]
pred[:, :, :2] = recalc_pose2(pred_keypoints, label_bbs, image_size)
pred_person_score = []
batch = pred_keypoints.shape[0]
num_joints = pred_keypoints.shape[1]
for idx in range(batch):
kpt_score = 0
count = 0
for i in range(num_joints):
mval = float(pred_score[idx][i])
if mval > visible_conf_threshold:
kpt_score += mval
count += 1
if count > 0:
kpt_score /= count
kpt_score = kpt_score * float(label_score[idx])
pred_person_score.append(kpt_score)
return pred, pred_person_score, label_img_id
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe1MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CocoHpe1MetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoHpe1Dataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = CocoHpe1Dataset.classes
self.input_image_size = (256, 192)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_capts = ["Val.CocoOksAp"]
self.test_metric_names = ["CocoHpeOksApMetric"]
self.test_metric_extra_kwargs = [
{"name": "OksAp",
"coco_annotations_file_path": None,
"use_file": False,
"pose_postprocessing_fn": lambda x, y: recalc_pose1b(x, y, self.input_image_size)}]
self.saver_acc_ind = 0
self.do_transform = True
self.test_transform = cocohpe_val_transform
self.test_transform2 = CocoHpeValTransform1
self.test_generator = cocohpe_test_generator
self.ml_type = "hpe"
self.net_extra_kwargs = {}
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
self.model_type = 1
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(CocoHpe1MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
nargs=2,
default=self.input_image_size,
help="size of the input for model")
parser.add_argument(
"--mean-rgb",
nargs=3,
type=float,
default=self.mean_rgb,
help="Mean of RGB channels in the dataset")
parser.add_argument(
"--std-rgb",
nargs=3,
type=float,
default=self.std_rgb,
help="STD of RGB channels in the dataset")
parser.add_argument(
"--model-type",
type=int,
default=self.model_type,
help="model type (1=SimplePose, 2=AlphaPose)")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CocoHpe1MetaInfo, self).update(args)
self.input_image_size = args.input_size
self.mean_rgb = args.mean_rgb
self.std_rgb = args.std_rgb
self.model_type = args.model_type
if self.model_type == 1:
self.test_metric_extra_kwargs[0]["pose_postprocessing_fn"] =\
lambda x, y: recalc_pose1b(x, y, self.input_image_size)
self.val_transform2 = CocoHpeValTransform1
self.test_transform2 = CocoHpeValTransform1
else:
self.test_metric_extra_kwargs[0]["pose_postprocessing_fn"] =\
lambda x, y: recalc_pose2b(x, y, self.input_image_size)
self.val_transform2 = CocoHpeValTransform2
self.test_transform2 = CocoHpeValTransform2
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpeDirectoryIterator(DirectoryIterator):
allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None}
def __init__(self,
directory,
image_data_generator,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dtype='float32',
dataset=None):
super(CocoHpeDirectoryIterator, self).set_processing_attrs(
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation)
self.dataset = dataset
self.class_mode = class_mode
self.dtype = dtype
self.n = len(self.dataset)
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns:
A batch of transformed samples.
"""
batch_x = None
batch_y = None
for i, j in enumerate(index_array):
x, y = self.dataset[j]
if batch_x is None:
batch_x = np.zeros((len(index_array),) + x.shape, dtype=self.dtype)
batch_y = np.zeros((len(index_array),) + y.shape, dtype=np.float32)
batch_x[i] = x
batch_y[i] = y
return batch_x, batch_y
class CocoHpeImageDataGenerator(ImageDataGenerator):
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dataset=None):
return CocoHpeDirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation,
dataset=dataset)
def cocohpe_val_transform(ds_metainfo,
data_format="channels_last"):
"""
Create image transform sequence for validation subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Pascal VOC2012 dataset metainfo.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
ImageDataGenerator
Image transform sequence.
"""
data_generator = CocoHpeImageDataGenerator(
preprocessing_function=(lambda img: ds_metainfo.val_transform2(ds_metainfo=ds_metainfo)(img)),
data_format=data_format)
return data_generator
def cocohpe_val_generator(data_generator,
ds_metainfo,
batch_size):
"""
Create image generator for validation subset.
Parameters:
----------
data_generator : ImageDataGenerator
Image transform sequence.
ds_metainfo : DatasetMetaInfo
Pascal VOC2012 dataset metainfo.
batch_size : int
Batch size.
Returns:
-------
Sequential
Image transform sequence.
"""
split = "val"
root = ds_metainfo.root_dir_path
root = os.path.join(root, split)
generator = data_generator.flow_from_directory(
directory=root,
target_size=ds_metainfo.input_image_size,
class_mode="binary",
batch_size=batch_size,
shuffle=False,
interpolation="bilinear",
dataset=ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="val",
transform=ds_metainfo.val_transform2(
ds_metainfo=ds_metainfo)))
return generator
def cocohpe_test_generator(data_generator,
ds_metainfo,
batch_size):
"""
Create image generator for testing subset.
Parameters:
----------
data_generator : ImageDataGenerator
Image transform sequence.
ds_metainfo : DatasetMetaInfo
Pascal VOC2012 dataset metainfo.
batch_size : int
Batch size.
Returns:
-------
Sequential
Image transform sequence.
"""
split = "val"
root = ds_metainfo.root_dir_path
root = os.path.join(root, split)
generator = data_generator.flow_from_directory(
directory=root,
target_size=ds_metainfo.input_image_size,
class_mode="binary",
batch_size=batch_size,
shuffle=False,
interpolation="bilinear",
dataset=ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="test",
transform=ds_metainfo.test_transform2(
ds_metainfo=ds_metainfo)))
return generator
| 37,195 | 33.282028 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/datasets/seg_dataset.py | import random
import threading
import numpy as np
from PIL import Image, ImageOps, ImageFilter
from tensorflow.keras.preprocessing.image import ImageDataGenerator, DirectoryIterator
class SegDataset(object):
"""
Segmentation base dataset.
Parameters:
----------
root : str
Path to data folder.
mode : str
'train', 'val', 'test', or 'demo'.
transform : callable
A function that transforms the image.
"""
def __init__(self,
root,
mode,
transform,
base_size=520,
crop_size=480):
super(SegDataset, self).__init__()
assert (mode in ("train", "val", "test", "demo"))
assert (mode in ("test", "demo"))
self.root = root
self.mode = mode
self.transform = transform
self.base_size = base_size
self.crop_size = crop_size
def _val_sync_transform(self, image, mask):
outsize = self.crop_size
short_size = outsize
w, h = image.size
if w > h:
oh = short_size
ow = int(1.0 * w * oh / h)
else:
ow = short_size
oh = int(1.0 * h * ow / w)
image = image.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = image.size
x1 = int(round(0.5 * (w - outsize)))
y1 = int(round(0.5 * (h - outsize)))
image = image.crop((x1, y1, x1 + outsize, y1 + outsize))
mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
# final transform
image, mask = self._img_transform(image), self._mask_transform(mask)
return image, mask
def _sync_transform(self, image, mask):
# random mirror
if random.random() < 0.5:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = image.size
if h > w:
ow = short_size
oh = int(1.0 * h * ow / w)
else:
oh = short_size
ow = int(1.0 * w * oh / h)
image = image.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
image = ImageOps.expand(image, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = image.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
image = image.crop((x1, y1, x1 + crop_size, y1 + crop_size))
mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
# gaussian blur as in PSP
if random.random() < 0.5:
image = image.filter(ImageFilter.GaussianBlur(
radius=random.random()))
# final transform
image, mask = self._img_transform(image), self._mask_transform(mask)
return image, mask
@staticmethod
def _img_transform(image):
return np.array(image)
@staticmethod
def _mask_transform(mask):
return np.array(mask).astype(np.int32)
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class SegDirectoryIterator(DirectoryIterator):
allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None}
def __init__(self,
directory,
image_data_generator,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dtype='float32',
dataset=None):
super(SegDirectoryIterator, self).set_processing_attrs(
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation)
self.dataset = dataset
self.class_mode = class_mode
self.dtype = dtype
self.n = len(self.dataset)
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns:
A batch of transformed samples.
"""
# batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=self.dtype)
# batch_y = np.zeros((len(index_array),) + self.image_shape, dtype=np.int32)
batch_x = None
batch_y = None
for i, j in enumerate(index_array):
x, y = self.dataset[j]
if batch_x is None:
batch_x = np.zeros((len(index_array),) + x.shape, dtype=self.dtype)
batch_y = np.zeros((len(index_array),) + y.shape, dtype=np.int32)
# if self.data_format == "channel_first":
# print("*")
# print("batch_x.shape={}".format(batch_x.shape))
# print("batch_y.shape={}".format(batch_y.shape))
# print("x.shape={}".format(x.shape))
# print("y.shape={}".format(y.shape))
batch_x[i] = x
batch_y[i] = y
return batch_x, batch_y
class SegImageDataGenerator(ImageDataGenerator):
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dataset=None):
return SegDirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation,
dataset=dataset)
| 7,631 | 33.378378 | 89 | py |
imgclsmob | imgclsmob-master/tensorflow2/datasets/coco_hpe2_dataset.py | """
COCO keypoint detection (2D multiple human pose estimation) dataset (for Lightweight OpenPose).
"""
import os
import json
import math
import threading
import cv2
from operator import itemgetter
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator, DirectoryIterator
from .dataset_metainfo import DatasetMetaInfo
class CocoHpe2Dataset(object):
"""
COCO keypoint detection (2D multiple human pose estimation) dataset.
Parameters:
----------
root : string
Path to `annotations`, `train2017`, and `val2017` folders.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None):
super(CocoHpe2Dataset, self).__init__()
self._root = os.path.expanduser(root)
self.mode = mode
self.transform = transform
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json")
with open(annotations_file_path, "r") as f:
self.file_names = json.load(f)["images"]
self.image_dir_path = os.path.join(root, mode_name + "2017")
self.annotations_file_path = annotations_file_path
def __str__(self):
return self.__class__.__name__ + "(" + self._root + ")"
def __len__(self):
return len(self.file_names)
def __getitem__(self, idx):
file_name = self.file_names[idx]["file_name"]
image_file_path = os.path.join(self.image_dir_path, file_name)
image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR)
# image = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB)
img_mean = (128, 128, 128)
img_scale = 1.0 / 256
base_height = 368
stride = 8
pad_value = (0, 0, 0)
height, width, _ = image.shape
image = self.normalize(image, img_mean, img_scale)
ratio = base_height / float(image.shape[0])
image = cv2.resize(image, (0, 0), fx=ratio, fy=ratio, interpolation=cv2.INTER_CUBIC)
min_dims = [base_height, max(image.shape[1], base_height)]
image, pad = self.pad_width(
image,
stride,
pad_value,
min_dims)
image = image.astype(np.float32)
# image = image.transpose((2, 0, 1))
# image = torch.from_numpy(image)
# if self.transform is not None:
# image = self.transform(image)
image_id = int(os.path.splitext(os.path.basename(file_name))[0])
label = np.array([image_id, 1.0] + pad + [height, width], np.float32)
# label = torch.from_numpy(label)
return image, label
@staticmethod
def normalize(img,
img_mean,
img_scale):
img = np.array(img, dtype=np.float32)
img = (img - img_mean) * img_scale
return img
@staticmethod
def pad_width(img,
stride,
pad_value,
min_dims):
h, w, _ = img.shape
h = min(min_dims[0], h)
min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride
min_dims[1] = max(min_dims[1], w)
min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride
top = int(math.floor((min_dims[0] - h) / 2.0))
left = int(math.floor((min_dims[1] - w) / 2.0))
bottom = int(min_dims[0] - h - top)
right = int(min_dims[1] - w - left)
pad = [top, left, bottom, right]
padded_img = cv2.copyMakeBorder(
src=img,
top=top,
bottom=bottom,
left=left,
right=right,
borderType=cv2.BORDER_CONSTANT,
value=pad_value)
return padded_img, pad
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe2ValTransform(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
def __call__(self, src, label):
return src, label
def extract_keypoints(heatmap,
all_keypoints,
total_keypoint_num):
heatmap[heatmap < 0.1] = 0
heatmap_with_borders = np.pad(heatmap, [(2, 2), (2, 2)], mode="constant")
heatmap_center = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 1:heatmap_with_borders.shape[1] - 1]
heatmap_left = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 2:heatmap_with_borders.shape[1]]
heatmap_right = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 0:heatmap_with_borders.shape[1] - 2]
heatmap_up = heatmap_with_borders[2:heatmap_with_borders.shape[0], 1:heatmap_with_borders.shape[1] - 1]
heatmap_down = heatmap_with_borders[0:heatmap_with_borders.shape[0] - 2, 1:heatmap_with_borders.shape[1] - 1]
heatmap_peaks = (heatmap_center > heatmap_left) &\
(heatmap_center > heatmap_right) &\
(heatmap_center > heatmap_up) &\
(heatmap_center > heatmap_down)
heatmap_peaks = heatmap_peaks[1:heatmap_center.shape[0] - 1, 1:heatmap_center.shape[1] - 1]
keypoints = list(zip(np.nonzero(heatmap_peaks)[1], np.nonzero(heatmap_peaks)[0])) # (w, h)
keypoints = sorted(keypoints, key=itemgetter(0))
suppressed = np.zeros(len(keypoints), np.uint8)
keypoints_with_score_and_id = []
keypoint_num = 0
for i in range(len(keypoints)):
if suppressed[i]:
continue
for j in range(i + 1, len(keypoints)):
if math.sqrt((keypoints[i][0] - keypoints[j][0]) ** 2 + (keypoints[i][1] - keypoints[j][1]) ** 2) < 6:
suppressed[j] = 1
keypoint_with_score_and_id = (
keypoints[i][0],
keypoints[i][1],
heatmap[keypoints[i][1], keypoints[i][0]],
total_keypoint_num + keypoint_num)
keypoints_with_score_and_id.append(keypoint_with_score_and_id)
keypoint_num += 1
all_keypoints.append(keypoints_with_score_and_id)
return keypoint_num
def group_keypoints(all_keypoints_by_type,
pafs,
pose_entry_size=20,
min_paf_score=0.05):
def linspace2d(start, stop, n=10):
points = 1 / (n - 1) * (stop - start)
return points[:, None] * np.arange(n) + start[:, None]
BODY_PARTS_KPT_IDS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11],
[11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 16], [5, 17]]
BODY_PARTS_PAF_IDS = ([12, 13], [20, 21], [14, 15], [16, 17], [22, 23], [24, 25], [0, 1], [2, 3], [4, 5],
[6, 7], [8, 9], [10, 11], [28, 29], [30, 31], [34, 35], [32, 33], [36, 37], [18, 19],
[26, 27])
pose_entries = []
all_keypoints = np.array([item for sublist in all_keypoints_by_type for item in sublist])
for part_id in range(len(BODY_PARTS_PAF_IDS)):
part_pafs = pafs[:, :, BODY_PARTS_PAF_IDS[part_id]]
kpts_a = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][0]]
kpts_b = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][1]]
num_kpts_a = len(kpts_a)
num_kpts_b = len(kpts_b)
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
if num_kpts_a == 0 and num_kpts_b == 0: # no keypoints for such body part
continue
elif num_kpts_a == 0: # body part has just 'b' keypoints
for i in range(num_kpts_b):
num = 0
for j in range(len(pose_entries)): # check if already in some pose, was added by another body part
if pose_entries[j][kpt_b_id] == kpts_b[i][3]:
num += 1
continue
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_b_id] = kpts_b[i][3] # keypoint idx
pose_entry[-1] = 1 # num keypoints in pose
pose_entry[-2] = kpts_b[i][2] # pose score
pose_entries.append(pose_entry)
continue
elif num_kpts_b == 0: # body part has just 'a' keypoints
for i in range(num_kpts_a):
num = 0
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == kpts_a[i][3]:
num += 1
continue
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_a_id] = kpts_a[i][3]
pose_entry[-1] = 1
pose_entry[-2] = kpts_a[i][2]
pose_entries.append(pose_entry)
continue
connections = []
for i in range(num_kpts_a):
kpt_a = np.array(kpts_a[i][0:2])
for j in range(num_kpts_b):
kpt_b = np.array(kpts_b[j][0:2])
mid_point = [(), ()]
mid_point[0] = (int(round((kpt_a[0] + kpt_b[0]) * 0.5)),
int(round((kpt_a[1] + kpt_b[1]) * 0.5)))
mid_point[1] = mid_point[0]
vec = [kpt_b[0] - kpt_a[0], kpt_b[1] - kpt_a[1]]
vec_norm = math.sqrt(vec[0] ** 2 + vec[1] ** 2)
if vec_norm == 0:
continue
vec[0] /= vec_norm
vec[1] /= vec_norm
cur_point_score = (vec[0] * part_pafs[mid_point[0][1], mid_point[0][0], 0] +
vec[1] * part_pafs[mid_point[1][1], mid_point[1][0], 1])
height_n = pafs.shape[0] // 2
success_ratio = 0
point_num = 10 # number of points to integration over paf
if cur_point_score > -100:
passed_point_score = 0
passed_point_num = 0
x, y = linspace2d(kpt_a, kpt_b)
for point_idx in range(point_num):
px = int(round(x[point_idx]))
py = int(round(y[point_idx]))
paf = part_pafs[py, px, 0:2]
cur_point_score = vec[0] * paf[0] + vec[1] * paf[1]
if cur_point_score > min_paf_score:
passed_point_score += cur_point_score
passed_point_num += 1
success_ratio = passed_point_num / point_num
ratio = 0
if passed_point_num > 0:
ratio = passed_point_score / passed_point_num
ratio += min(height_n / vec_norm - 1, 0)
if ratio > 0 and success_ratio > 0.8:
score_all = ratio + kpts_a[i][2] + kpts_b[j][2]
connections.append([i, j, ratio, score_all])
if len(connections) > 0:
connections = sorted(connections, key=itemgetter(2), reverse=True)
num_connections = min(num_kpts_a, num_kpts_b)
has_kpt_a = np.zeros(num_kpts_a, dtype=np.int32)
has_kpt_b = np.zeros(num_kpts_b, dtype=np.int32)
filtered_connections = []
for row in range(len(connections)):
if len(filtered_connections) == num_connections:
break
i, j, cur_point_score = connections[row][0:3]
if not has_kpt_a[i] and not has_kpt_b[j]:
filtered_connections.append([kpts_a[i][3], kpts_b[j][3], cur_point_score])
has_kpt_a[i] = 1
has_kpt_b[j] = 1
connections = filtered_connections
if len(connections) == 0:
continue
if part_id == 0:
pose_entries = [np.ones(pose_entry_size) * -1 for _ in range(len(connections))]
for i in range(len(connections)):
pose_entries[i][BODY_PARTS_KPT_IDS[0][0]] = connections[i][0]
pose_entries[i][BODY_PARTS_KPT_IDS[0][1]] = connections[i][1]
pose_entries[i][-1] = 2
pose_entries[i][-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2]
elif part_id == 17 or part_id == 18:
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
for i in range(len(connections)):
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == connections[i][0] and pose_entries[j][kpt_b_id] == -1:
pose_entries[j][kpt_b_id] = connections[i][1]
elif pose_entries[j][kpt_b_id] == connections[i][1] and pose_entries[j][kpt_a_id] == -1:
pose_entries[j][kpt_a_id] = connections[i][0]
continue
else:
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
for i in range(len(connections)):
num = 0
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == connections[i][0]:
pose_entries[j][kpt_b_id] = connections[i][1]
num += 1
pose_entries[j][-1] += 1
pose_entries[j][-2] += all_keypoints[connections[i][1], 2] + connections[i][2]
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_a_id] = connections[i][0]
pose_entry[kpt_b_id] = connections[i][1]
pose_entry[-1] = 2
pose_entry[-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2]
pose_entries.append(pose_entry)
filtered_entries = []
for i in range(len(pose_entries)):
if pose_entries[i][-1] < 3 or (pose_entries[i][-2] / pose_entries[i][-1] < 0.2):
continue
filtered_entries.append(pose_entries[i])
pose_entries = np.asarray(filtered_entries)
return pose_entries, all_keypoints
def convert_to_coco_format(pose_entries, all_keypoints):
coco_keypoints = []
scores = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
keypoints = [0] * 17 * 3
to_coco_map = [0, -1, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3]
person_score = pose_entries[n][-2]
position_id = -1
for keypoint_id in pose_entries[n][:-2]:
position_id += 1
if position_id == 1: # no 'neck' in COCO
continue
cx, cy, score, visibility = 0, 0, 0, 0 # keypoint not found
if keypoint_id != -1:
cx, cy, score = all_keypoints[int(keypoint_id), 0:3]
cx = cx + 0.5
cy = cy + 0.5
visibility = 1
keypoints[to_coco_map[position_id] * 3 + 0] = cx
keypoints[to_coco_map[position_id] * 3 + 1] = cy
keypoints[to_coco_map[position_id] * 3 + 2] = visibility
coco_keypoints.append(keypoints)
scores.append(person_score * max(0, (pose_entries[n][-1] - 1))) # -1 for 'neck'
return coco_keypoints, scores
def recalc_pose(pred,
label):
label_img_id = label[:, 0].astype(np.int32)
# label_score = label[:, 1]
pred = pred.transpose((0, 3, 1, 2))
pads = label[:, 2:6].astype(np.int32)
heights = label[:, 6].astype(np.int32)
widths = label[:, 7].astype(np.int32)
keypoints = 19
stride = 8
heatmap2ds = pred[:, :keypoints]
paf2ds = pred[:, keypoints:(3 * keypoints)]
pred_pts_score = []
pred_person_score = []
label_img_id_ = []
batch = pred.shape[0]
for batch_i in range(batch):
label_img_id_i = label_img_id[batch_i]
pad = list(pads[batch_i])
height = int(heights[batch_i])
width = int(widths[batch_i])
heatmap2d = heatmap2ds[batch_i]
paf2d = paf2ds[batch_i]
heatmaps = np.transpose(heatmap2d, (1, 2, 0))
heatmaps = cv2.resize(heatmaps, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
heatmaps = heatmaps[pad[0]:heatmaps.shape[0] - pad[2], pad[1]:heatmaps.shape[1] - pad[3]:, :]
heatmaps = cv2.resize(heatmaps, (width, height), interpolation=cv2.INTER_CUBIC)
pafs = np.transpose(paf2d, (1, 2, 0))
pafs = cv2.resize(pafs, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
pafs = pafs[pad[0]:pafs.shape[0] - pad[2], pad[1]:pafs.shape[1] - pad[3], :]
pafs = cv2.resize(pafs, (width, height), interpolation=cv2.INTER_CUBIC)
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(18): # 19th for bg
total_keypoints_num += extract_keypoints(
heatmaps[:, :, kpt_idx],
all_keypoints_by_type,
total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(
all_keypoints_by_type,
pafs)
coco_keypoints, scores = convert_to_coco_format(
pose_entries,
all_keypoints)
pred_pts_score.append(coco_keypoints)
pred_person_score.append(scores)
label_img_id_.append([label_img_id_i] * len(scores))
return np.array(pred_pts_score).reshape((-1, 17, 3)), np.array(pred_person_score)[0], np.array(label_img_id_[0])
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe2MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CocoHpe2MetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoHpe2Dataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = 17
self.input_image_size = (368, 368)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_capts = ["Val.CocoOksAp"]
self.test_metric_names = ["CocoHpeOksApMetric"]
self.test_metric_extra_kwargs = [
{"name": "OksAp",
"coco_annotations_file_path": None,
"use_file": False,
"pose_postprocessing_fn": lambda x, y: recalc_pose(x, y)}]
self.saver_acc_ind = 0
self.do_transform = True
self.test_transform = cocohpe_val_transform
self.test_transform2 = CocoHpe2ValTransform
self.test_generator = cocohpe_test_generator
self.ml_type = "hpe"
self.net_extra_kwargs = {}
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
self.load_ignore_extra = False
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(CocoHpe2MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
nargs=2,
default=self.input_image_size,
help="size of the input for model")
parser.add_argument(
"--load-ignore-extra",
action="store_true",
help="ignore extra layers in the source PyTroch model")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CocoHpe2MetaInfo, self).update(args)
self.input_image_size = args.input_size
self.load_ignore_extra = args.load_ignore_extra
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpeDirectoryIterator(DirectoryIterator):
allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None}
def __init__(self,
directory,
image_data_generator,
target_size=(368, 368),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dtype='float32',
dataset=None):
super(CocoHpeDirectoryIterator, self).set_processing_attrs(
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation)
self.dataset = dataset
self.class_mode = class_mode
self.dtype = dtype
self.n = len(self.dataset)
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns:
A batch of transformed samples.
"""
batch_x = None
batch_y = None
for i, j in enumerate(index_array):
x, y = self.dataset[j]
if batch_x is None:
batch_x = np.zeros((len(index_array),) + x.shape, dtype=self.dtype)
batch_y = np.zeros((len(index_array),) + y.shape, dtype=np.float32)
batch_x[i] = x
batch_y[i] = y
return batch_x, batch_y
class CocoHpeImageDataGenerator(ImageDataGenerator):
def flow_from_directory(self,
directory,
target_size=(368, 368),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dataset=None):
return CocoHpeDirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation,
dataset=dataset)
def cocohpe_val_transform(ds_metainfo,
data_format="channels_last"):
"""
Create image transform sequence for validation subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Pascal VOC2012 dataset metainfo.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
ImageDataGenerator
Image transform sequence.
"""
data_generator = CocoHpeImageDataGenerator(
preprocessing_function=(lambda img: ds_metainfo.val_transform2(ds_metainfo=ds_metainfo)(img)),
data_format=data_format)
return data_generator
def cocohpe_val_generator(data_generator,
ds_metainfo,
batch_size):
"""
Create image generator for validation subset.
Parameters:
----------
data_generator : ImageDataGenerator
Image transform sequence.
ds_metainfo : DatasetMetaInfo
Pascal VOC2012 dataset metainfo.
batch_size : int
Batch size.
Returns:
-------
Sequential
Image transform sequence.
"""
split = "val"
root = ds_metainfo.root_dir_path
root = os.path.join(root, split)
generator = data_generator.flow_from_directory(
directory=root,
target_size=ds_metainfo.input_image_size,
class_mode="binary",
batch_size=batch_size,
shuffle=False,
interpolation="bilinear",
dataset=ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="val",
transform=ds_metainfo.val_transform2(
ds_metainfo=ds_metainfo)))
return generator
def cocohpe_test_generator(data_generator,
ds_metainfo,
batch_size):
"""
Create image generator for testing subset.
Parameters:
----------
data_generator : ImageDataGenerator
Image transform sequence.
ds_metainfo : DatasetMetaInfo
Pascal VOC2012 dataset metainfo.
batch_size : int
Batch size.
Returns:
-------
Sequential
Image transform sequence.
"""
split = "val"
root = ds_metainfo.root_dir_path
root = os.path.join(root, split)
generator = data_generator.flow_from_directory(
directory=root,
target_size=ds_metainfo.input_image_size,
class_mode="binary",
batch_size=batch_size,
shuffle=False,
interpolation="bilinear",
dataset=ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="test",
transform=ds_metainfo.test_transform2(
ds_metainfo=ds_metainfo)))
return generator
| 27,367 | 37.011111 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/datasets/coco_hpe3_dataset.py | """
COCO keypoint detection (2D multiple human pose estimation) dataset (for IBPPose).
"""
import os
import threading
import math
import cv2
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator, DirectoryIterator
from .dataset_metainfo import DatasetMetaInfo
class CocoHpe3Dataset(object):
"""
COCO keypoint detection (2D multiple human pose estimation) dataset.
Parameters:
----------
root : string
Path to `annotations`, `train2017`, and `val2017` folders.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None):
super(CocoHpe3Dataset, self).__init__()
self._root = os.path.expanduser(root)
self.mode = mode
self.transform = transform
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json")
# with open(annotations_file_path, "r") as f:
# self.file_names = json.load(f)["images"]
self.image_dir_path = os.path.join(root, mode_name + "2017")
self.annotations_file_path = annotations_file_path
from pycocotools.coco import COCO
self.coco_gt = COCO(self.annotations_file_path)
self.validation_ids = self.coco_gt.getImgIds()[:]
def __str__(self):
return self.__class__.__name__ + "(" + self._root + ")"
def __len__(self):
return len(self.validation_ids)
def __getitem__(self, idx):
# file_name = self.file_names[idx]["file_name"]
image_id = self.validation_ids[idx]
file_name = self.coco_gt.imgs[image_id]["file_name"]
image_file_path = os.path.join(self.image_dir_path, file_name)
image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR)
# image = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB)
image_src_shape = image.shape[:2]
boxsize = 512
max_downsample = 64
pad_value = 128
scale = boxsize / image.shape[0]
if scale * image.shape[0] > 2600 or scale * image.shape[1] > 3800:
scale = min(2600 / image.shape[0], 3800 / image.shape[1])
image = cv2.resize(image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
image, pad = self.pad_right_down_corner(image, max_downsample, pad_value)
image = np.float32(image / 255)
# image = image.transpose((2, 0, 1))
# image_id = int(os.path.splitext(os.path.basename(file_name))[0])
label = np.array([image_id, 1.0] + pad + list(image_src_shape), np.float32)
return image, label
@staticmethod
def pad_right_down_corner(img,
stride,
pad_value):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0 # up
pad[1] = 0 # left
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
img_padded = img
pad_up = np.tile(img_padded[0:1, :, :] * 0 + pad_value, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis=0)
pad_left = np.tile(img_padded[:, 0:1, :] * 0 + pad_value, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis=1)
pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + pad_value, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis=0)
pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + pad_value, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis=1)
return img_padded, pad
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe2ValTransform(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
def __call__(self, src, label):
return src, label
def recalc_pose(pred,
label):
dt_gt_mapping = {0: 0, 1: None, 2: 6, 3: 8, 4: 10, 5: 5, 6: 7, 7: 9, 8: 12, 9: 14, 10: 16, 11: 11, 12: 13, 13: 15,
14: 2, 15: 1, 16: 4, 17: 3}
parts = ["nose", "neck", "Rsho", "Relb", "Rwri", "Lsho", "Lelb", "Lwri", "Rhip", "Rkne", "Rank", "Lhip", "Lkne",
"Lank", "Reye", "Leye", "Rear", "Lear"]
num_parts = len(parts)
parts_dict = dict(zip(parts, range(num_parts)))
limb_from = ['neck', 'neck', 'neck', 'neck', 'neck', 'nose', 'nose', 'Reye', 'Leye', 'neck', 'Rsho', 'Relb', 'neck',
'Lsho', 'Lelb', 'neck', 'Rhip', 'Rkne', 'neck', 'Lhip', 'Lkne', 'nose', 'nose', 'Rsho', 'Rhip', 'Lsho',
'Lhip', 'Rear', 'Lear', 'Rhip']
limb_to = ['nose', 'Reye', 'Leye', 'Rear', 'Lear', 'Reye', 'Leye', 'Rear', 'Lear', 'Rsho', 'Relb', 'Rwri', 'Lsho',
'Lelb', 'Lwri', 'Rhip', 'Rkne', 'Rank', 'Lhip', 'Lkne', 'Lank', 'Rsho', 'Lsho', 'Rhip', 'Lkne', 'Lhip',
'Rkne', 'Rsho', 'Lsho', 'Lhip']
limb_from = [parts_dict[n] for n in limb_from]
limb_to = [parts_dict[n] for n in limb_to]
assert limb_from == [x for x in [
1, 1, 1, 1, 1, 0, 0, 14, 15, 1, 2, 3, 1, 5, 6, 1, 8, 9, 1, 11, 12, 0, 0, 2, 8, 5, 11, 16, 17, 8]]
assert limb_to == [x for x in [
0, 14, 15, 16, 17, 14, 15, 16, 17, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 2, 5, 8, 12, 11, 9, 2, 5, 11]]
limbs_conn = list(zip(limb_from, limb_to))
limb_seq = limbs_conn
paf_layers = 30
num_layers = 50
stride = 4
label_img_id = label[:, 0].astype(np.int32)
# label_score = label[:, 1]
pads = label[:, 2:6].astype(np.int32)
image_src_shapes = label[:, 6:8].astype(np.int32)
pred_pts_score = []
pred_person_score = []
label_img_id_ = []
batch = pred.shape[0]
for batch_i in range(batch):
label_img_id_i = label_img_id[batch_i]
pad = list(pads[batch_i])
image_src_shape = list(image_src_shapes[batch_i])
# output_blob = pred[batch_i].transpose((1, 2, 0))
output_blob = pred[batch_i]
output_paf = output_blob[:, :, :paf_layers]
output_heatmap = output_blob[:, :, paf_layers:num_layers]
heatmap = cv2.resize(output_heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
heatmap = heatmap[
pad[0]:(output_blob.shape[0] * stride - pad[2]),
pad[1]:(output_blob.shape[1] * stride - pad[3]),
:]
heatmap = cv2.resize(heatmap, (image_src_shape[1], image_src_shape[0]), interpolation=cv2.INTER_CUBIC)
paf = cv2.resize(output_paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
paf = paf[
pad[0]:(output_blob.shape[0] * stride - pad[2]),
pad[1]:(output_blob.shape[1] * stride - pad[3]),
:]
paf = cv2.resize(paf, (image_src_shape[1], image_src_shape[0]), interpolation=cv2.INTER_CUBIC)
all_peaks = find_peaks(heatmap)
connection_all, special_k = find_connections(all_peaks, paf, image_src_shape[0], limb_seq)
subset, candidate = find_people(connection_all, special_k, all_peaks, limb_seq)
for s in subset[..., 0]:
keypoint_indexes = s[:18]
person_keypoint_coordinates = []
for index in keypoint_indexes:
if index == -1:
X, Y, C = 0, 0, 0
else:
X, Y, C = list(candidate[index.astype(int)][:2]) + [1]
person_keypoint_coordinates.append([X, Y, C])
person_keypoint_coordinates_coco = [None] * 17
for dt_index, gt_index in dt_gt_mapping.items():
if gt_index is None:
continue
person_keypoint_coordinates_coco[gt_index] = person_keypoint_coordinates[dt_index]
pred_pts_score.append(person_keypoint_coordinates_coco)
pred_person_score.append(1 - 1.0 / s[18])
label_img_id_.append(label_img_id_i)
return np.array(pred_pts_score).reshape((-1, 17, 3)), np.array(pred_person_score), np.array(label_img_id_)
def find_peaks(heatmap_avg):
import torch
thre1 = 0.1
offset_radius = 2
all_peaks = []
peak_counter = 0
heatmap_avg = heatmap_avg.astype(np.float32)
filter_map = heatmap_avg[:, :, :18].copy().transpose((2, 0, 1))[None, ...]
filter_map = torch.from_numpy(filter_map).cuda()
filter_map = keypoint_heatmap_nms(filter_map, kernel=3, thre=thre1)
filter_map = filter_map.cpu().numpy().squeeze().transpose((1, 2, 0))
for part in range(18):
map_ori = heatmap_avg[:, :, part]
peaks_binary = filter_map[:, :, part]
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
refined_peaks_with_score = [refine_centroid(map_ori, anchor, offset_radius) for anchor in peaks]
id = range(peak_counter, peak_counter + len(refined_peaks_with_score))
peaks_with_score_and_id = [refined_peaks_with_score[i] + (id[i],) for i in range(len(id))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
return all_peaks
def keypoint_heatmap_nms(heat, kernel=3, thre=0.1):
from torch.nn import functional as F
# keypoint NMS on heatmap (score map)
pad = (kernel - 1) // 2
pad_heat = F.pad(heat, (pad, pad, pad, pad), mode="reflect")
hmax = F.max_pool2d(pad_heat, (kernel, kernel), stride=1, padding=0)
keep = (hmax == heat).float() * (heat >= thre).float()
return heat * keep
def refine_centroid(scorefmp, anchor, radius):
"""
Refine the centroid coordinate. It dose not affect the results after testing.
:param scorefmp: 2-D numpy array, original regressed score map
:param anchor: python tuple, (x,y) coordinates
:param radius: int, range of considered scores
:return: refined anchor, refined score
"""
x_c, y_c = anchor
x_min = x_c - radius
x_max = x_c + radius + 1
y_min = y_c - radius
y_max = y_c + radius + 1
if y_max > scorefmp.shape[0] or y_min < 0 or x_max > scorefmp.shape[1] or x_min < 0:
return anchor + (scorefmp[y_c, x_c], )
score_box = scorefmp[y_min:y_max, x_min:x_max]
x_grid, y_grid = np.mgrid[-radius:radius + 1, -radius:radius + 1]
offset_x = (score_box * x_grid).sum() / score_box.sum()
offset_y = (score_box * y_grid).sum() / score_box.sum()
x_refine = x_c + offset_x
y_refine = y_c + offset_y
refined_anchor = (x_refine, y_refine)
return refined_anchor + (score_box.mean(),)
def find_connections(all_peaks, paf_avg, image_width, limb_seq):
mid_num_ = 20
thre2 = 0.1
connect_ration = 0.8
connection_all = []
special_k = []
for k in range(len(limb_seq)):
score_mid = paf_avg[:, :, k]
candA = all_peaks[limb_seq[k][0]]
candB = all_peaks[limb_seq[k][1]]
nA = len(candA)
nB = len(candB)
if nA != 0 and nB != 0:
connection_candidate = []
for i in range(nA):
for j in range(nB):
vec = np.subtract(candB[j][:2], candA[i][:2])
norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
mid_num = min(int(round(norm + 1)), mid_num_)
if norm == 0:
continue
startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num),
np.linspace(candA[i][1], candB[j][1], num=mid_num)))
limb_response = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0]))] for
I in range(len(startend))])
score_midpts = limb_response
score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(0.5 * image_width / norm - 1, 0)
criterion1 = len(np.nonzero(score_midpts > thre2)[0]) >= connect_ration * len(score_midpts)
criterion2 = score_with_dist_prior > 0
if criterion1 and criterion2:
connection_candidate.append([
i,
j,
score_with_dist_prior,
norm,
0.5 * score_with_dist_prior + 0.25 * candA[i][2] + 0.25 * candB[j][2]])
connection_candidate = sorted(connection_candidate, key=lambda x: x[4], reverse=True)
connection = np.zeros((0, 6))
for c in range(len(connection_candidate)):
i, j, s, limb_len = connection_candidate[c][0:4]
if i not in connection[:, 3] and j not in connection[:, 4]:
connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j, limb_len]])
if len(connection) >= min(nA, nB):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
return connection_all, special_k
def find_people(connection_all, special_k, all_peaks, limb_seq):
len_rate = 16.0
connection_tole = 0.7
remove_recon = 0
subset = -1 * np.ones((0, 20, 2))
candidate = np.array([item for sublist in all_peaks for item in sublist])
for k in range(len(limb_seq)):
if k not in special_k:
partAs = connection_all[k][:, 0]
partBs = connection_all[k][:, 1]
indexA, indexB = np.array(limb_seq[k])
for i in range(len(connection_all[k])):
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)):
if subset[j][indexA][0].astype(int) == (partAs[i]).astype(int) or subset[j][indexB][0].astype(
int) == partBs[i].astype(int):
if found >= 2:
continue
subset_idx[found] = j
found += 1
if found == 1:
j = subset_idx[0]
if subset[j][indexB][0].astype(int) == -1 and\
len_rate * subset[j][-1][1] > connection_all[k][i][-1]:
subset[j][indexB][0] = partBs[i]
subset[j][indexB][1] = connection_all[k][i][2]
subset[j][-1][0] += 1
subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1])
elif subset[j][indexB][0].astype(int) != partBs[i].astype(int):
if subset[j][indexB][1] >= connection_all[k][i][2]:
pass
else:
if len_rate * subset[j][-1][1] <= connection_all[k][i][-1]:
continue
subset[j][-2][0] -= candidate[subset[j][indexB][0].astype(int), 2] + subset[j][indexB][1]
subset[j][indexB][0] = partBs[i]
subset[j][indexB][1] = connection_all[k][i][2]
subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1])
elif subset[j][indexB][0].astype(int) == partBs[i].astype(int) and\
subset[j][indexB][1] <= connection_all[k][i][2]:
subset[j][-2][0] -= candidate[subset[j][indexB][0].astype(int), 2] + subset[j][indexB][1]
subset[j][indexB][0] = partBs[i]
subset[j][indexB][1] = connection_all[k][i][2]
subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1])
else:
pass
elif found == 2:
j1, j2 = subset_idx
membership1 = ((subset[j1][..., 0] >= 0).astype(int))[:-2]
membership2 = ((subset[j2][..., 0] >= 0).astype(int))[:-2]
membership = membership1 + membership2
if len(np.nonzero(membership == 2)[0]) == 0:
min_limb1 = np.min(subset[j1, :-2, 1][membership1 == 1])
min_limb2 = np.min(subset[j2, :-2, 1][membership2 == 1])
min_tolerance = min(min_limb1, min_limb2)
if connection_all[k][i][2] < connection_tole * min_tolerance or\
len_rate * subset[j1][-1][1] <= connection_all[k][i][-1]:
continue
subset[j1][:-2][...] += (subset[j2][:-2][...] + 1)
subset[j1][-2:][:, 0] += subset[j2][-2:][:, 0]
subset[j1][-2][0] += connection_all[k][i][2]
subset[j1][-1][1] = max(connection_all[k][i][-1], subset[j1][-1][1])
subset = np.delete(subset, j2, 0)
else:
if connection_all[k][i][0] in subset[j1, :-2, 0]:
c1 = np.where(subset[j1, :-2, 0] == connection_all[k][i][0])
c2 = np.where(subset[j2, :-2, 0] == connection_all[k][i][1])
else:
c1 = np.where(subset[j1, :-2, 0] == connection_all[k][i][1])
c2 = np.where(subset[j2, :-2, 0] == connection_all[k][i][0])
c1 = int(c1[0])
c2 = int(c2[0])
assert c1 != c2, "an candidate keypoint is used twice, shared by two people"
if connection_all[k][i][2] < subset[j1][c1][1] and connection_all[k][i][2] < subset[j2][c2][1]:
continue
small_j = j1
remove_c = c1
if subset[j1][c1][1] > subset[j2][c2][1]:
small_j = j2
remove_c = c2
if remove_recon > 0:
subset[small_j][-2][0] -= candidate[subset[small_j][remove_c][0].astype(int), 2] + \
subset[small_j][remove_c][1]
subset[small_j][remove_c][0] = -1
subset[small_j][remove_c][1] = -1
subset[small_j][-1][0] -= 1
elif not found and k < len(limb_seq):
row = -1 * np.ones((20, 2))
row[indexA][0] = partAs[i]
row[indexA][1] = connection_all[k][i][2]
row[indexB][0] = partBs[i]
row[indexB][1] = connection_all[k][i][2]
row[-1][0] = 2
row[-1][1] = connection_all[k][i][-1]
row[-2][0] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
row = row[np.newaxis, :, :]
subset = np.concatenate((subset, row), axis=0)
deleteIdx = []
for i in range(len(subset)):
if subset[i][-1][0] < 2 or subset[i][-2][0] / subset[i][-1][0] < 0.45:
deleteIdx.append(i)
subset = np.delete(subset, deleteIdx, axis=0)
return subset, candidate
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe3MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CocoHpe3MetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoHpe3Dataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = 17
self.input_image_size = (256, 256)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_capts = ["Val.CocoOksAp"]
self.test_metric_names = ["CocoHpeOksApMetric"]
self.test_metric_extra_kwargs = [
{"name": "OksAp",
"coco_annotations_file_path": None,
"validation_ids": None,
"use_file": False,
"pose_postprocessing_fn": lambda x, y: recalc_pose(x, y)}]
self.saver_acc_ind = 0
self.do_transform = True
self.test_transform = cocohpe_val_transform
self.test_transform2 = CocoHpe2ValTransform
self.test_generator = cocohpe_test_generator
self.ml_type = "hpe"
self.net_extra_kwargs = {}
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
self.load_ignore_extra = False
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(CocoHpe3MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
nargs=2,
default=self.input_image_size,
help="size of the input for model")
parser.add_argument(
"--load-ignore-extra",
action="store_true",
help="ignore extra layers in the source PyTroch model")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CocoHpe3MetaInfo, self).update(args)
self.input_image_size = args.input_size
self.load_ignore_extra = args.load_ignore_extra
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
# self.test_metric_extra_kwargs[0]["validation_ids"] = dataset.validation_ids
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpeDirectoryIterator(DirectoryIterator):
allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None}
def __init__(self,
directory,
image_data_generator,
target_size=(368, 368),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dtype='float32',
dataset=None):
super(CocoHpeDirectoryIterator, self).set_processing_attrs(
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation)
self.dataset = dataset
self.class_mode = class_mode
self.dtype = dtype
self.n = len(self.dataset)
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns:
A batch of transformed samples.
"""
batch_x = None
batch_y = None
for i, j in enumerate(index_array):
x, y = self.dataset[j]
if batch_x is None:
batch_x = np.zeros((len(index_array),) + x.shape, dtype=self.dtype)
batch_y = np.zeros((len(index_array),) + y.shape, dtype=np.float32)
batch_x[i] = x
batch_y[i] = y
return batch_x, batch_y
class CocoHpeImageDataGenerator(ImageDataGenerator):
def flow_from_directory(self,
directory,
target_size=(368, 368),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dataset=None):
return CocoHpeDirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation,
dataset=dataset)
def cocohpe_val_transform(ds_metainfo,
data_format="channels_last"):
"""
Create image transform sequence for validation subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Pascal VOC2012 dataset metainfo.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
ImageDataGenerator
Image transform sequence.
"""
data_generator = CocoHpeImageDataGenerator(
preprocessing_function=(lambda img: ds_metainfo.val_transform2(ds_metainfo=ds_metainfo)(img)),
data_format=data_format)
return data_generator
def cocohpe_val_generator(data_generator,
ds_metainfo,
batch_size):
"""
Create image generator for validation subset.
Parameters:
----------
data_generator : ImageDataGenerator
Image transform sequence.
ds_metainfo : DatasetMetaInfo
Pascal VOC2012 dataset metainfo.
batch_size : int
Batch size.
Returns:
-------
Sequential
Image transform sequence.
"""
split = "val"
root = ds_metainfo.root_dir_path
root = os.path.join(root, split)
generator = data_generator.flow_from_directory(
directory=root,
target_size=ds_metainfo.input_image_size,
class_mode="binary",
batch_size=batch_size,
shuffle=False,
interpolation="bilinear",
dataset=ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="val",
transform=ds_metainfo.val_transform2(
ds_metainfo=ds_metainfo)))
return generator
def cocohpe_test_generator(data_generator,
ds_metainfo,
batch_size):
"""
Create image generator for testing subset.
Parameters:
----------
data_generator : ImageDataGenerator
Image transform sequence.
ds_metainfo : DatasetMetaInfo
Pascal VOC2012 dataset metainfo.
batch_size : int
Batch size.
Returns:
-------
Sequential
Image transform sequence.
"""
split = "val"
root = ds_metainfo.root_dir_path
root = os.path.join(root, split)
generator = data_generator.flow_from_directory(
directory=root,
target_size=ds_metainfo.input_image_size,
class_mode="binary",
batch_size=batch_size,
shuffle=False,
interpolation="bilinear",
dataset=ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="test",
transform=ds_metainfo.test_transform2(
ds_metainfo=ds_metainfo)))
return generator
| 29,689 | 37.408797 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/datasets/cifar10_cls_dataset.py | """
CIFAR-10 classification dataset.
"""
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from .dataset_metainfo import DatasetMetaInfo
from .cls_dataset import img_normalization
class CIFAR10MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CIFAR10MetaInfo, self).__init__()
self.label = "CIFAR10"
self.short_label = "cifar"
self.root_dir_name = "cifar10"
self.dataset_class = None
self.num_training_samples = 50000
self.in_channels = 3
self.num_classes = 10
self.input_image_size = (32, 32)
self.train_metric_capts = ["Train.Err"]
self.train_metric_names = ["Top1Error"]
self.train_metric_extra_kwargs = [{"name": "err"}]
self.val_metric_capts = ["Val.Err"]
self.val_metric_names = ["Top1Error"]
self.val_metric_extra_kwargs = [{"name": "err"}]
self.saver_acc_ind = 0
self.train_transform = cifar10_train_transform
self.val_transform = cifar10_val_transform
self.test_transform = cifar10_val_transform
self.train_generator = cifar10_train_generator
self.val_generator = cifar10_val_generator
self.test_generator = cifar10_val_generator
self.ml_type = "imgcls"
self.mean_rgb = (0.4914, 0.4822, 0.4465)
self.std_rgb = (0.2023, 0.1994, 0.2010)
# self.interpolation_msg = "nearest"
def cifar10_train_transform(ds_metainfo,
data_format="channels_last"):
"""
Create image transform sequence for training subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
ImageDataGenerator
Image transform sequence.
"""
data_generator = ImageDataGenerator(
preprocessing_function=(lambda img: img_normalization(
img=img,
mean_rgb=ds_metainfo.mean_rgb,
std_rgb=ds_metainfo.std_rgb)),
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
data_format=data_format)
return data_generator
def cifar10_val_transform(ds_metainfo,
data_format="channels_last"):
"""
Create image transform sequence for validation subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
ImageDataGenerator
Image transform sequence.
"""
data_generator = ImageDataGenerator(
preprocessing_function=(lambda img: img_normalization(
img=img,
mean_rgb=ds_metainfo.mean_rgb,
std_rgb=ds_metainfo.std_rgb)),
data_format=data_format)
return data_generator
def cifar10_train_generator(data_generator,
ds_metainfo,
batch_size):
"""
Create image generator for training subset.
Parameters:
----------
data_generator : ImageDataGenerator
Image transform sequence.
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
batch_size : int
Batch size.
Returns:
-------
Sequential
Image transform sequence.
"""
assert(ds_metainfo is not None)
(x_train, y_train), _ = cifar10.load_data()
generator = data_generator.flow(
x=x_train,
y=y_train,
batch_size=batch_size,
shuffle=False)
return generator
def cifar10_val_generator(data_generator,
ds_metainfo,
batch_size):
"""
Create image generator for validation subset.
Parameters:
----------
data_generator : ImageDataGenerator
Image transform sequence.
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
batch_size : int
Batch size.
Returns:
-------
Sequential
Image transform sequence.
"""
assert(ds_metainfo is not None)
_, (x_test, y_test) = cifar10.load_data()
generator = data_generator.flow(
x=x_test,
y=y_test,
batch_size=batch_size,
shuffle=False)
return generator
| 4,434 | 27.798701 | 67 | py |
imgclsmob | imgclsmob-master/tensorflow2/datasets/cub200_2011_cls_dataset.py | """
CUB-200-2011 classification dataset.
"""
import os
import numpy as np
import pandas as pd
import threading
from tensorflow.keras.preprocessing.image import ImageDataGenerator, DirectoryIterator
from .cls_dataset import img_normalization
from .imagenet1k_cls_dataset import ImageNet1KMetaInfo
class CUBDirectoryIterator(DirectoryIterator):
allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None}
def __init__(self,
directory,
image_data_generator,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dtype='float32',
mode="val"):
super(CUBDirectoryIterator, self).set_processing_attrs(
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation)
root_dir_path = os.path.expanduser(directory)
assert os.path.exists(root_dir_path)
images_file_name = "images.txt"
images_file_path = os.path.join(root_dir_path, images_file_name)
if not os.path.exists(images_file_path):
raise Exception("Images file doesn't exist: {}".format(images_file_name))
class_file_name = "image_class_labels.txt"
class_file_path = os.path.join(root_dir_path, class_file_name)
if not os.path.exists(class_file_path):
raise Exception("Image class file doesn't exist: {}".format(class_file_name))
split_file_name = "train_test_split.txt"
split_file_path = os.path.join(root_dir_path, split_file_name)
if not os.path.exists(split_file_path):
raise Exception("Split file doesn't exist: {}".format(split_file_name))
images_df = pd.read_csv(
images_file_path,
sep="\s+",
header=None,
index_col=False,
names=["image_id", "image_path"],
dtype={"image_id": np.int32, "image_path": np.unicode})
class_df = pd.read_csv(
class_file_path,
sep="\s+",
header=None,
index_col=False,
names=["image_id", "class_id"],
dtype={"image_id": np.int32, "class_id": np.uint8})
split_df = pd.read_csv(
split_file_path,
sep="\s+",
header=None,
index_col=False,
names=["image_id", "split_flag"],
dtype={"image_id": np.int32, "split_flag": np.uint8})
df = images_df.join(class_df, rsuffix="_class_df").join(split_df, rsuffix="_split_df")
split_flag = 1 if mode == "train" else 0
subset_df = df[df.split_flag == split_flag]
image_ids = subset_df["image_id"].values.astype(np.int32)
class_ids = subset_df["class_id"].values.astype(np.int32) - 1
image_file_names = subset_df["image_path"].values.astype(np.unicode)
images_dir_name = "images"
self.images_dir_path = os.path.join(root_dir_path, images_dir_name)
assert os.path.exists(self.images_dir_path)
assert (len(image_ids) == len(class_ids))
self.class_mode = class_mode
self.dtype = dtype
self._filepaths = [os.path.join(self.images_dir_path, image_file_name) for image_file_name in image_file_names]
self.classes = [int(class_id) for class_id in class_ids]
self.n = len(class_ids)
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
class CubImageDataGenerator(ImageDataGenerator):
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
mode="val"):
return CUBDirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation,
mode=mode)
class CUB200MetaInfo(ImageNet1KMetaInfo):
def __init__(self):
super(CUB200MetaInfo, self).__init__()
self.label = "CUB200_2011"
self.short_label = "cub"
self.root_dir_name = "CUB_200_2011"
self.dataset_class = None
self.num_training_samples = None
self.num_classes = 200
self.train_metric_capts = ["Train.Err"]
self.train_metric_names = ["Top1Error"]
self.train_metric_extra_kwargs = [{"name": "err"}]
self.val_metric_capts = ["Val.Err"]
self.val_metric_names = ["Top1Error"]
self.val_metric_extra_kwargs = [{"name": "err"}]
self.saver_acc_ind = 0
self.train_transform = cub200_train_transform
self.val_transform = cub200_val_transform
self.test_transform = cub200_val_transform
self.train_generator = cub200_train_generator
self.val_generator = cub200_val_generator
self.test_generator = cub200_val_generator
self.net_extra_kwargs = {"aux": False}
self.load_ignore_extra = True
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
super(CUB200MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--no-aux",
dest="no_aux",
action="store_true",
help="no `aux` mode in model")
def update(self,
args):
super(CUB200MetaInfo, self).update(args)
if args.no_aux:
self.net_extra_kwargs = None
self.load_ignore_extra = False
def cub200_train_transform(ds_metainfo,
data_format="channels_last"):
"""
Create image transform sequence for training subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
CUB-200-2011 dataset metainfo.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
ImageDataGenerator
Image transform sequence.
"""
data_generator = CubImageDataGenerator(
preprocessing_function=(lambda img: img_normalization(
img=img,
mean_rgb=ds_metainfo.mean_rgb,
std_rgb=ds_metainfo.std_rgb)),
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
data_format=data_format)
return data_generator
def cub200_val_transform(ds_metainfo,
data_format="channels_last"):
"""
Create image transform sequence for validation subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
CUB-200-2011 dataset metainfo.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
ImageDataGenerator
Image transform sequence.
"""
data_generator = CubImageDataGenerator(
preprocessing_function=(lambda img: img_normalization(
img=img,
mean_rgb=ds_metainfo.mean_rgb,
std_rgb=ds_metainfo.std_rgb)),
data_format=data_format)
return data_generator
def cub200_train_generator(data_generator,
ds_metainfo,
batch_size):
"""
Create image generator for training subset.
Parameters:
----------
data_generator : ImageDataGenerator
Image transform sequence.
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
batch_size : int
Batch size.
Returns:
-------
Sequential
Image transform sequence.
"""
root = ds_metainfo.root_dir_path
generator = data_generator.flow_from_directory(
directory=root,
target_size=ds_metainfo.input_image_size,
class_mode="binary",
batch_size=batch_size,
shuffle=False,
interpolation=ds_metainfo.interpolation_msg,
mode="val")
return generator
def cub200_val_generator(data_generator,
ds_metainfo,
batch_size):
"""
Create image generator for validation subset.
Parameters:
----------
data_generator : ImageDataGenerator
Image transform sequence.
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
batch_size : int
Batch size.
Returns:
-------
Sequential
Image transform sequence.
"""
root = ds_metainfo.root_dir_path
generator = data_generator.flow_from_directory(
directory=root,
target_size=ds_metainfo.input_image_size,
class_mode="binary",
batch_size=batch_size,
shuffle=False,
interpolation=ds_metainfo.interpolation_msg,
mode="val")
return generator
| 10,350 | 32.070288 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/datasets/cifar100_cls_dataset.py | """
CIFAR-100 classification dataset.
"""
from tensorflow.keras.datasets import cifar100
from .cifar10_cls_dataset import CIFAR10MetaInfo
class CIFAR100MetaInfo(CIFAR10MetaInfo):
def __init__(self):
super(CIFAR100MetaInfo, self).__init__()
self.label = "CIFAR100"
self.root_dir_name = "cifar100"
self.num_classes = 100
self.train_generator = cifar100_train_generator
self.val_generator = cifar100_val_generator
self.test_generator = cifar100_val_generator
def cifar100_train_generator(data_generator,
ds_metainfo,
batch_size):
"""
Create image generator for training subset.
Parameters:
----------
data_generator : ImageDataGenerator
Image transform sequence.
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
batch_size : int
Batch size.
Returns:
-------
Sequential
Image transform sequence.
"""
assert(ds_metainfo is not None)
(x_train, y_train), _ = cifar100.load_data()
generator = data_generator.flow(
x=x_train,
y=y_train,
batch_size=batch_size,
shuffle=False)
return generator
def cifar100_val_generator(data_generator,
ds_metainfo,
batch_size):
"""
Create image generator for validation subset.
Parameters:
----------
data_generator : ImageDataGenerator
Image transform sequence.
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
batch_size : int
Batch size.
Returns:
-------
Sequential
Image transform sequence.
"""
assert(ds_metainfo is not None)
_, (x_test, y_test) = cifar100.load_data()
generator = data_generator.flow(
x=x_test,
y=y_test,
batch_size=batch_size,
shuffle=False)
return generator
| 1,963 | 24.179487 | 55 | py |
imgclsmob | imgclsmob-master/examples/convert_tf2_to_tfl.py | """
Script for converting trained model from TensorFlow 2.0 to TensorFlow Lite.
"""
import argparse
import numpy as np
import tensorflow as tf
from tf2cv.model_provider import get_model as tf2cv_get_model
from tensorflow2.utils import prepare_model
def parse_args():
"""
Create python script parameters.
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Converting a model from TensorFlow 2.0 to TensorFlow Lite",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--input",
type=str,
help="path to model weights")
parser.add_argument(
"--input-shape",
type=int,
default=(1, 640, 480, 3),
help="input tensor shape")
parser.add_argument(
"--output-dir",
type=str,
help="path to dir for output TFL file")
args = parser.parse_args()
return args
def main():
"""
Main body of script.
"""
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
args = parse_args()
if args.input:
net_extra_kwargs = {"in_size": args.input_shape[1:3]}
model = prepare_model(
model_name=args.model,
use_pretrained=False,
pretrained_model_file_path=args.input,
net_extra_kwargs=net_extra_kwargs)
else:
model = tf2cv_get_model(
args.model,
pretrained=True)
x = tf.zeros(shape=args.input_shape)
_ = model.predict(x)
# Convert the model.
converter = tf.lite.TFLiteConverter.from_keras_model(model)
# converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_LATENCY]
# converter.optimizations = [tf.lite.Optimize.DEFAULT]
# dataset = np.load(args.dataset)
# def representative_dataset_gen():
# for i in range(len(dataset)):
# yield [dataset[i:i + 1]]
# converter.representative_dataset = representative_dataset_gen
# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
# converter.inference_input_type = tf.int8
# converter.inference_output_type = tf.int8
# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
tflite_model = converter.convert()
if args.output_dir is not None:
open("{}/{}.tflite".format(args.output_dir, args.model), "wb").write(tflite_model)
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test the TensorFlow Lite model on random input data.
input_shape = input_details[0]["shape"]
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]["index"], input_data)
interpreter.invoke()
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
tflite_results = interpreter.get_tensor(output_details[0]["index"])
# Test the TensorFlow model on random input data.
tf_results = model(tf.constant(input_data))
# Compare the result.
for tf_result, tflite_result in zip(tf_results, tflite_results):
np.testing.assert_almost_equal(tf_result[0], tflite_result, decimal=5)
print("All OK.")
if __name__ == "__main__":
main()
| 3,835 | 29.204724 | 106 | py |
imgclsmob | imgclsmob-master/examples/demo_pt.py | """
Script for evaluating trained model on PyTorch / ImageNet-1K (demo mode).
"""
import math
import argparse
import numpy as np
import cv2
import torch
from gluoncv.data import ImageNet1kAttr
from pytorchcv.model_provider import get_model as ptcv_get_model
def parse_args():
"""
Create python script parameters.
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate an ImageNet-1K model on PyTorch (demo mode)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--image",
type=str,
required=True,
help="path to testing image")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--mean-rgb",
nargs=3,
type=float,
default=(0.485, 0.456, 0.406),
help="Mean of RGB channels in the dataset")
parser.add_argument(
"--std-rgb",
nargs=3,
type=float,
default=(0.229, 0.224, 0.225),
help="STD of RGB channels in the dataset")
args = parser.parse_args()
return args
def main():
"""
Main body of script.
"""
args = parse_args()
# Load a testing image:
image = cv2.imread(args.image, flags=cv2.IMREAD_COLOR)
# cv2.imshow("image", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
image = cv2.cvtColor(image, code=cv2.COLOR_BGR2RGB)
# Resize image with keeping aspect ratio:
resize_value = int(math.ceil(float(args.input_size) / args.resize_inv_factor))
h, w = image.shape[:2]
if not ((w == resize_value and w <= h) or (h == resize_value and h <= w)):
resize_size = (resize_value, int(resize_value * h / w)) if w < h else (int(resize_value * w / h), resize_value)
image = cv2.resize(image, dsize=resize_size, interpolation=cv2.INTER_LINEAR)
# Center crop of the image:
h, w = image.shape[:2]
th, tw = args.input_size, args.input_size
ih = int(round(0.5 * (h - th)))
jw = int(round(0.5 * (w - tw)))
image = image[ih:(ih + th), jw:(jw + tw), :]
# cv2.imshow("image2", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# Convert image to a float tensor and normalize it:
x = image.astype(np.float32)
x = x / 255.0
x = (x - np.array(args.mean_rgb)) / np.array(args.std_rgb)
# Create `use_cuda` flag:
use_cuda = (args.num_gpus > 0)
# Convert the tensor to a Pytorch tensor:
x = x.transpose(2, 0, 1)
x = np.expand_dims(x, axis=0)
x = torch.FloatTensor(x)
if use_cuda:
x = x.cuda()
# Create model with loading pretrained weights:
net = ptcv_get_model(args.model, pretrained=True)
net.eval()
if use_cuda:
net = net.cuda()
# Evaluate the network:
y = net(x)
probs = torch.nn.Softmax(dim=-1)(y)
# Show results:
top_k = 5
probs_np = probs.cpu().detach().numpy().squeeze(axis=0)
top_k_inds = probs_np.argsort()[::-1][:top_k]
classes = ImageNet1kAttr().classes
print("The input picture is classified to be:")
for k in range(top_k):
print("{idx}: [{class_name}], with probability {prob:.3f}.".format(
idx=(k + 1),
class_name=classes[top_k_inds[k]],
prob=probs_np[top_k_inds[k]]))
if __name__ == "__main__":
main()
| 3,876 | 27.094203 | 119 | py |
imgclsmob | imgclsmob-master/examples/demo_gl.py | """
Script for evaluating trained model on MXNet/Gluon / ImageNet-1K (demo mode).
"""
import math
import argparse
import numpy as np
import cv2
import mxnet as mx
from gluoncv.data import ImageNet1kAttr
from gluoncv2.model_provider import get_model as glcv2_get_model
def parse_args():
"""
Create python script parameters.
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate an ImageNet-1K model on Gluon (demo mode)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--image",
type=str,
required=True,
help="path to testing image")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--mean-rgb",
nargs=3,
type=float,
default=(0.485, 0.456, 0.406),
help="Mean of RGB channels in the dataset")
parser.add_argument(
"--std-rgb",
nargs=3,
type=float,
default=(0.229, 0.224, 0.225),
help="STD of RGB channels in the dataset")
args = parser.parse_args()
return args
def main():
"""
Main body of script.
"""
args = parse_args()
# Load a testing image:
image = cv2.imread(args.image, flags=cv2.IMREAD_COLOR)
# cv2.imshow("image", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
image = cv2.cvtColor(image, code=cv2.COLOR_BGR2RGB)
# Resize image with keeping aspect ratio:
resize_value = int(math.ceil(float(args.input_size) / args.resize_inv_factor))
h, w = image.shape[:2]
if not ((w == resize_value and w <= h) or (h == resize_value and h <= w)):
resize_size = (resize_value, int(resize_value * h / w)) if w < h else (int(resize_value * w / h), resize_value)
image = cv2.resize(image, dsize=resize_size, interpolation=cv2.INTER_LINEAR)
# Center crop of the image:
h, w = image.shape[:2]
th, tw = args.input_size, args.input_size
ih = int(round(0.5 * (h - th)))
jw = int(round(0.5 * (w - tw)))
image = image[ih:(ih + th), jw:(jw + tw), :]
# cv2.imshow("image2", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# Convert image to a float tensor and normalize it:
x = image.astype(np.float32)
x = x / 255.0
x = (x - np.array(args.mean_rgb)) / np.array(args.std_rgb)
# Create MXNet context:
mx_ctx = [mx.gpu(i) for i in range(args.num_gpus)] if args.num_gpus > 0 else [mx.cpu()]
# Convert the tensor to a MXNet nd-array:
x = x.transpose(2, 0, 1)
x = np.expand_dims(x, axis=0)
x = mx.nd.array(x, ctx=mx.cpu())
# Create model with loading pretrained weights:
net = glcv2_get_model(args.model, pretrained=True, ctx=mx_ctx)
# Evaluate the network:
y = net(x)
probs = mx.nd.softmax(y)
# Show results:
top_k = 5
probs_np = probs.asnumpy().squeeze(axis=0)
top_k_inds = probs_np.argsort()[::-1][:top_k]
classes = ImageNet1kAttr().classes
print("The input picture is classified to be:")
for k in range(top_k):
print("{idx}: [{class_name}], with probability {prob:.3f}.".format(
idx=(k + 1),
class_name=classes[top_k_inds[k]],
prob=probs_np[top_k_inds[k]]))
if __name__ == "__main__":
main()
| 3,841 | 27.887218 | 119 | py |
imgclsmob | imgclsmob-master/other/train_pt_cifar-.py | import argparse
import time
import logging
import os
import warnings
import random
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.utils.data
from common.logger_utils import initialize_logging
from common.train_log_param_saver import TrainLogParamSaver
from pytorch.cifar1 import add_dataset_parser_arguments, get_train_data_loader, get_val_data_loader
from pytorch.utils import prepare_pt_context, prepare_model, validate1, accuracy, AverageMeter
def parse_args():
parser = argparse.ArgumentParser(
description='Train a model for image classification (PyTorch/CIFAR)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset',
type=str,
default="CIFAR10",
help='dataset name. options are CIFAR10 and CIFAR100')
args, _ = parser.parse_known_args()
add_dataset_parser_arguments(parser, args.dataset)
parser.add_argument(
'--model',
type=str,
required=True,
help='type of model to use. see model_provider for options.')
parser.add_argument(
'--use-pretrained',
action='store_true',
help='enable using pretrained model from gluon.')
parser.add_argument(
'--resume',
type=str,
default='',
help='resume from previously saved parameters if not None')
parser.add_argument(
'--resume-state',
type=str,
default='',
help='resume from previously saved optimizer state if not None')
parser.add_argument(
'--num-gpus',
type=int,
default=0,
help='number of gpus to use.')
parser.add_argument(
'-j',
'--num-data-workers',
dest='num_workers',
default=4,
type=int,
help='number of preprocessing workers')
parser.add_argument(
'--batch-size',
type=int,
default=128,
help='training batch size per device (CPU/GPU).')
parser.add_argument(
'--num-epochs',
type=int,
default=3,
help='number of training epochs.')
parser.add_argument(
'--start-epoch',
type=int,
default=1,
help='starting epoch for resuming, default is 1 for new training')
parser.add_argument(
'--attempt',
type=int,
default=1,
help='current number of training')
parser.add_argument(
'--optimizer-name',
type=str,
default='nag',
help='optimizer name')
parser.add_argument(
'--lr',
type=float,
default=0.1,
help='learning rate. default is 0.1.')
parser.add_argument(
'--lr-mode',
type=str,
default='step',
help='learning rate scheduler mode. options are step, poly and cosine.')
parser.add_argument(
'--lr-decay',
type=float,
default=0.1,
help='decay rate of learning rate. default is 0.1.')
parser.add_argument(
'--lr-decay-period',
type=int,
default=0,
help='interval for periodic learning rate decays. default is 0 to disable.')
parser.add_argument(
'--lr-decay-epoch',
type=str,
default='40,60',
help='epoches at which learning rate decays. default is 40,60.')
parser.add_argument(
'--warmup-lr',
type=float,
default=0.0,
help='starting warmup learning rate. default is 0.0.')
parser.add_argument(
'--warmup-epochs',
type=int,
default=0,
help='number of warmup epochs.')
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='momentum value for optimizer, default is 0.9.')
parser.add_argument(
'--wd',
type=float,
default=0.0001,
help='weight decay rate. default is 0.0001.')
parser.add_argument(
'--log-interval',
type=int,
default=200,
help='number of batches to wait before logging.')
parser.add_argument(
'--save-interval',
type=int,
default=4,
help='saving parameters epoch interval, best model will always be saved')
parser.add_argument(
'--save-dir',
type=str,
default='',
help='directory of saved models and log-files')
parser.add_argument(
'--logging-file-name',
type=str,
default='train.log',
help='filename of training log')
parser.add_argument(
'--seed',
type=int,
default=-1,
help='Random seed to be fixed')
parser.add_argument(
'--log-packages',
type=str,
default='torch, torchvision',
help='list of python packages for logging')
parser.add_argument(
'--log-pip-packages',
type=str,
default='',
help='list of pip packages for logging')
args = parser.parse_args()
return args
def init_rand(seed):
if seed <= 0:
seed = np.random.randint(10000)
else:
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
return seed
def prepare_trainer(net,
optimizer_name,
wd,
momentum,
lr_mode,
lr,
lr_decay_period,
lr_decay_epoch,
lr_decay,
# warmup_epochs,
# batch_size,
num_epochs,
# num_training_samples,
state_file_path):
optimizer_name = optimizer_name.lower()
if (optimizer_name == 'sgd') or (optimizer_name == 'nag'):
optimizer = torch.optim.SGD(
params=net.parameters(),
lr=lr,
momentum=momentum,
weight_decay=wd,
nesterov=(optimizer_name == 'nag'))
else:
raise ValueError("Usupported optimizer: {}".format(optimizer_name))
if state_file_path:
checkpoint = torch.load(state_file_path)
if type(checkpoint) == dict:
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch']
else:
start_epoch = None
else:
start_epoch = None
cudnn.benchmark = True
lr_mode = lr_mode.lower()
if lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(',')]
if (lr_mode == 'step') and (lr_decay_period != 0):
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer=optimizer,
step_size=lr_decay_period,
gamma=lr_decay,
last_epoch=-1)
elif (lr_mode == 'multistep') or ((lr_mode == 'step') and (lr_decay_period == 0)):
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer=optimizer,
milestones=lr_decay_epoch,
gamma=lr_decay,
last_epoch=-1)
elif lr_mode == 'cosine':
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer=optimizer,
T_max=num_epochs,
last_epoch=(num_epochs - 1))
else:
raise ValueError("Usupported lr_scheduler: {}".format(lr_mode))
return optimizer, lr_scheduler, start_epoch
def save_params(file_stem,
state):
torch.save(
obj=state['state_dict'],
f=(file_stem + '.pth'))
torch.save(
obj=state,
f=(file_stem + '.states'))
def train_epoch(epoch,
acc_metric_train,
net,
train_data,
use_cuda,
L,
optimizer,
# lr_scheduler,
batch_size,
log_interval):
tic = time.time()
net.train()
acc_metric_train.reset()
train_loss = 0.0
btic = time.time()
for i, (data, target) in enumerate(train_data):
if use_cuda:
data = data.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
output = net(data)
loss = L(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
acc_train_value = accuracy(output, target, topk=(1, ))
acc_metric_train.update(acc_train_value[0], data.size(0))
if log_interval and not (i + 1) % log_interval:
acc_train_value = acc_metric_train.avg.item()
err_train_value = 1.0 - acc_train_value
speed = batch_size * log_interval / (time.time() - btic)
logging.info('Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\terr={:.4f}\tlr={:.4f}'.format(
epoch + 1, i, speed, err_train_value, optimizer.param_groups[0]['lr']))
btic = time.time()
acc_train_value = acc_metric_train.avg.item()
err_train_value = 1.0 - acc_train_value
train_loss /= (i + 1)
throughput = int(batch_size * (i + 1) / (time.time() - tic))
logging.info('[Epoch {}] training: err={:.4f}\tloss={:.4f}'.format(
epoch + 1, err_train_value, train_loss))
logging.info('[Epoch {}] speed: {:.2f} samples/sec\ttime cost: {:.2f} sec'.format(
epoch + 1, throughput, time.time() - tic))
return err_train_value, train_loss
def train_net(batch_size,
num_epochs,
start_epoch1,
train_data,
val_data,
net,
optimizer,
lr_scheduler,
lp_saver,
log_interval,
use_cuda):
acc_metric_val = AverageMeter()
acc_metric_train = AverageMeter()
L = nn.CrossEntropyLoss()
if use_cuda:
L = L.cuda()
assert (type(start_epoch1) == int)
assert (start_epoch1 >= 1)
if start_epoch1 > 1:
logging.info('Start training from [Epoch {}]'.format(start_epoch1))
err_val = validate1(
accuracy_metric=acc_metric_val,
net=net,
val_data=val_data,
use_cuda=use_cuda)
logging.info('[Epoch {}] validation: err={:.4f}'.format(
start_epoch1 - 1, err_val))
gtic = time.time()
for epoch in range(start_epoch1 - 1, num_epochs):
lr_scheduler.step()
err_train, train_loss = train_epoch(
epoch,
acc_metric_train,
net,
train_data,
use_cuda,
L,
optimizer,
# lr_scheduler,
batch_size,
log_interval)
err_val = validate1(
accuracy_metric=acc_metric_val,
net=net,
val_data=val_data,
use_cuda=use_cuda)
logging.info('[Epoch {}] validation: err={:.4f}'.format(
epoch + 1, err_val))
if lp_saver is not None:
state = {
'epoch': epoch + 1,
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict(),
}
lp_saver_kwargs = {'state': state}
lp_saver.epoch_test_end_callback(
epoch1=(epoch + 1),
params=[err_val, err_train, train_loss, optimizer.param_groups[0]['lr']],
**lp_saver_kwargs)
logging.info('Total time cost: {:.2f} sec'.format(time.time() - gtic))
if lp_saver is not None:
logging.info('Best err: {:.4f} at {} epoch'.format(
lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch))
def main():
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
use_cuda, batch_size = prepare_pt_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
use_cuda=use_cuda)
train_data = get_train_data_loader(
dataset_name=args.dataset,
dataset_dir=args.data_dir,
batch_size=batch_size,
num_workers=args.num_workers)
val_data = get_val_data_loader(
dataset_name=args.dataset,
dataset_dir=args.data_dir,
batch_size=batch_size,
num_workers=args.num_workers)
# num_training_samples = 1281167
optimizer, lr_scheduler, start_epoch = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
wd=args.wd,
momentum=args.momentum,
lr_mode=args.lr_mode,
lr=args.lr,
lr_decay_period=args.lr_decay_period,
lr_decay_epoch=args.lr_decay_epoch,
lr_decay=args.lr_decay,
# warmup_epochs=args.warmup_epochs,
# batch_size=batch_size,
num_epochs=args.num_epochs,
# num_training_samples=num_training_samples,
state_file_path=args.resume_state)
# if start_epoch is not None:
# args.start_epoch = start_epoch
if args.save_dir and args.save_interval:
lp_saver = TrainLogParamSaver(
checkpoint_file_name_prefix='{}_{}'.format(args.dataset.lower(), args.model),
last_checkpoint_file_name_suffix="last",
best_checkpoint_file_name_suffix=None,
last_checkpoint_dir_path=args.save_dir,
best_checkpoint_dir_path=None,
last_checkpoint_file_count=2,
best_checkpoint_file_count=2,
checkpoint_file_save_callback=save_params,
checkpoint_file_exts=('.pth', '.states'),
save_interval=args.save_interval,
num_epochs=args.num_epochs,
param_names=['Val.Err', 'Train.Err', 'Train.Loss', 'LR'],
acc_ind=0,
# bigger=[True],
# mask=None,
score_log_file_path=os.path.join(args.save_dir, 'score.log'),
score_log_attempt_value=args.attempt,
best_map_log_file_path=os.path.join(args.save_dir, 'best_map.log'))
else:
lp_saver = None
train_net(
batch_size=batch_size,
num_epochs=args.num_epochs,
start_epoch1=args.start_epoch,
train_data=train_data,
val_data=val_data,
net=net,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
lp_saver=lp_saver,
log_interval=args.log_interval,
use_cuda=use_cuda)
if __name__ == '__main__':
main()
| 15,172 | 30.092213 | 105 | py |
imgclsmob | imgclsmob-master/other/train_gl_seg.py | import os
import shutil
import argparse
from tqdm import tqdm
import mxnet as mx
from mxnet import gluon, autograd
from mxnet.gluon.data.vision import transforms
import gluoncv
from gluoncv.loss import MixSoftmaxCrossEntropyLoss
from gluoncv.utils import LRScheduler
from gluoncv.model_zoo.segbase import get_segmentation_model, SegEvalModel
from gluoncv.model_zoo import get_model
from gluoncv.utils.parallel import DataParallelModel, DataParallelCriterion
from gluoncv.data import get_segmentation_dataset
def parse_args():
"""Training Options for Segmentation Experiments"""
parser = argparse.ArgumentParser(description='MXNet Gluon Segmentation')
parser.add_argument('--model', type=str, default='fcn', help='model name (default: fcn)')
parser.add_argument('--backbone', type=str, default='resnet50', help='backbone name (default: resnet50)')
parser.add_argument('--dataset', type=str, default='pascalaug', help='dataset name (default: pascal)')
parser.add_argument('--dataset-dir', type=str, default='../imgclsmob_data/voc', help='dataset path')
parser.add_argument('--workers', type=int, default=16, metavar='N', help='dataloader threads')
parser.add_argument('--base-size', type=int, default=520, help='base image size')
parser.add_argument('--crop-size', type=int, default=480, help='crop image size')
parser.add_argument('--train-split', type=str, default='train', help='dataset train split (default: train)')
parser.add_argument('--aux', action='store_true', default=False, help='Auxiliary loss')
parser.add_argument('--aux-weight', type=float, default=0.5, help='auxiliary loss weight')
parser.add_argument('--epochs', type=int, default=50, metavar='N', help='number of epochs to train (default: 50)')
parser.add_argument('--start_epoch', type=int, default=0, metavar='N', help='start epochs (default:0)')
parser.add_argument('--batch-size', type=int, default=16, metavar='N',
help='input batch size for training (default: 16)')
parser.add_argument('--test-batch-size', type=int, default=16, metavar='N',
help='input batch size for testing (default: 32)')
parser.add_argument('--lr', type=float, default=1e-3, metavar='LR', help='learning rate (default: 1e-3)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=1e-4, metavar='M', help='w-decay (default: 1e-4)')
parser.add_argument('--no-wd', action='store_true',
help='whether to remove weight decay on bias, and beta/gamma for batchnorm layers.')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--ngpus', type=int, default=len(mx.test_utils.list_gpus()), help='number of GPUs (default: 4)')
parser.add_argument('--kvstore', type=str, default='device', help='kvstore to use for trainer/module.')
parser.add_argument('--dtype', type=str, default='float32', help='data type for training. default is float32')
# checking point
parser.add_argument('--resume', type=str, default=None, help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default='default', help='set the checkpoint name')
parser.add_argument('--model-zoo', type=str, default=None, help='evaluating on model zoo model')
# evaluation only
parser.add_argument('--eval', action='store_true', default=False, help='evaluation only')
parser.add_argument('--no-val', action='store_true', default=False, help='skip validation during training')
# synchronized Batch Normalization
parser.add_argument('--syncbn', action='store_true', default=False, help='using Synchronized Cross-GPU BatchNorm')
# the parser
args = parser.parse_args()
# handle contexts
if args.no_cuda:
print('Using CPU')
args.kvstore = 'local'
args.ctx = [mx.cpu(0)]
else:
print('Number of GPUs:', args.ngpus)
args.ctx = [mx.gpu(i) for i in range(args.ngpus)]
# Synchronized BatchNorm
args.norm_layer = mx.gluon.contrib.nn.SyncBatchNorm if args.syncbn else mx.gluon.nn.BatchNorm
args.norm_kwargs = {'num_devices': args.ngpus} if args.syncbn else {}
print(args)
return args
class Trainer(object):
def __init__(self, args):
self.args = args
# image transform
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([.485, .456, .406], [.229, .224, .225]),
])
# dataset and dataloader
data_kwargs = {
'transform': input_transform,
'base_size': args.base_size,
'crop_size': args.crop_size,
'root': args.dataset_dir}
trainset = get_segmentation_dataset(
args.dataset,
split=args.train_split,
mode='train',
**data_kwargs)
valset = get_segmentation_dataset(
args.dataset,
split='val',
mode='val',
**data_kwargs)
self.train_data = gluon.data.DataLoader(
trainset,
args.batch_size,
shuffle=True,
last_batch='rollover',
num_workers=args.workers)
self.eval_data = gluon.data.DataLoader(
valset,
args.test_batch_size,
last_batch='rollover',
num_workers=args.workers)
# create network
if args.model_zoo is not None:
model = get_model(args.model_zoo, pretrained=True)
else:
model = get_segmentation_model(
model=args.model,
dataset=args.dataset,
backbone=args.backbone,
norm_layer=args.norm_layer,
norm_kwargs=args.norm_kwargs,
aux=args.aux,
crop_size=args.crop_size)
model.cast(args.dtype)
print(model)
self.net = DataParallelModel(model, args.ctx, args.syncbn)
self.evaluator = DataParallelModel(SegEvalModel(model), args.ctx)
# resume checkpoint if needed
if args.resume is not None:
if os.path.isfile(args.resume):
model.load_parameters(args.resume, ctx=args.ctx)
else:
raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume))
# create criterion
criterion = MixSoftmaxCrossEntropyLoss(args.aux, aux_weight=args.aux_weight)
self.criterion = DataParallelCriterion(criterion, args.ctx, args.syncbn)
# optimizer and lr scheduling
self.lr_scheduler = LRScheduler(
mode='poly',
base_lr=args.lr,
nepochs=args.epochs,
iters_per_epoch=len(self.train_data),
power=0.9)
kv = mx.kv.create(args.kvstore)
optimizer_params = {
'lr_scheduler': self.lr_scheduler,
'wd': args.weight_decay,
'momentum': args.momentum}
if args.dtype == 'float16':
optimizer_params['multi_precision'] = True
if args.no_wd:
for k, v in self.net.module.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
self.optimizer = gluon.Trainer(
self.net.module.collect_params(),
'sgd',
optimizer_params,
kvstore=kv)
# evaluation metrics
self.metric = gluoncv.utils.metrics.SegmentationMetric(trainset.num_class)
def training(self, epoch):
tbar = tqdm(self.train_data)
train_loss = 0.0
for i, (data, target) in enumerate(tbar):
with autograd.record(True):
outputs = self.net(data.astype(args.dtype, copy=False))
losses = self.criterion(outputs, target)
mx.nd.waitall()
autograd.backward(losses)
self.optimizer.step(self.args.batch_size)
for loss in losses:
train_loss += loss.asnumpy()[0] / len(losses)
tbar.set_description('Epoch {}, training loss {}'.format(epoch, train_loss / (i + 1)))
mx.nd.waitall()
# save every epoch
save_checkpoint(self.net.module, self.args, False)
def validation(self, epoch):
self.metric.reset()
tbar = tqdm(self.eval_data)
for i, (data, target) in enumerate(tbar):
outputs = self.evaluator(data.astype(args.dtype, copy=False))
outputs = [x[0] for x in outputs]
targets = mx.gluon.utils.split_and_load(target, args.ctx, even_split=False)
self.metric.update(targets, outputs)
pixAcc, mIoU = self.metric.get()
tbar.set_description('Epoch {}, validation pixAcc: {}, mIoU: {}'.format(epoch, pixAcc, mIoU))
mx.nd.waitall()
def save_checkpoint(net, args, is_best=False):
"""Save Checkpoint"""
directory = "../imgclsmob_data/{}/{}/{}/".format(args.dataset, args.model, args.checkname)
if not os.path.exists(directory):
os.makedirs(directory)
filename = 'checkpoint.params'
filename = directory + filename
net.save_parameters(filename)
if is_best:
shutil.copyfile(filename, directory + 'model_best.params')
if __name__ == "__main__":
args = parse_args()
trainer = Trainer(args)
if args.eval:
print('Evaluating model: ', args.resume)
trainer.validation(args.start_epoch)
else:
print('Starting Epoch:', args.start_epoch)
print('Total Epochs:', args.epochs)
for epoch in range(args.start_epoch, args.epochs):
trainer.training(epoch)
if not trainer.args.no_val:
trainer.validation(epoch)
| 9,856 | 43.201794 | 120 | py |
imgclsmob | imgclsmob-master/other/train_gl_cifar-.py | import argparse
import time
import logging
import os
import numpy as np
import random
import mxnet as mx
from mxnet import gluon
from mxnet import autograd as ag
from common.logger_utils import initialize_logging
from common.train_log_param_saver import TrainLogParamSaver
from gluon.lr_scheduler import LRScheduler
from gluon.utils import prepare_mx_context, prepare_model, validate, report_accuracy, get_composite_metric
from gluon.dataset_utils import get_dataset_metainfo
from gluon.dataset_utils import get_batch_fn
from gluon.dataset_utils import get_train_data_source
from gluon.dataset_utils import get_val_data_source
def parse_args():
parser = argparse.ArgumentParser(
description='Train a model for image classification (Gluon/CIFAR)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset',
type=str,
default="CIFAR10",
help='dataset name. options are CIFAR10 and CIFAR100')
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
parser.add_argument(
'--model',
type=str,
required=True,
help='type of model to use. see model_provider for options.')
parser.add_argument(
'--use-pretrained',
action='store_true',
help='enable using pretrained model from gluon.')
parser.add_argument(
'--dtype',
type=str,
default='float32',
help='data type for training')
parser.add_argument(
'--not-hybridize',
action='store_true',
help='do not hybridize model')
parser.add_argument(
'--resume',
type=str,
default='',
help='resume from previously saved parameters if not None')
parser.add_argument(
'--resume-state',
type=str,
default='',
help='resume from previously saved optimizer state if not None')
parser.add_argument(
'--num-gpus',
type=int,
default=0,
help='number of gpus to use.')
parser.add_argument(
'-j',
'--num-data-workers',
dest='num_workers',
default=4,
type=int,
help='number of preprocessing workers')
parser.add_argument(
'--batch-size',
type=int,
default=128,
help='training batch size per device (CPU/GPU).')
parser.add_argument(
'--batch-size-scale',
type=int,
default=1,
help='manual batch-size increasing factor.')
parser.add_argument(
'--num-epochs',
type=int,
default=200,
help='number of training epochs.')
parser.add_argument(
'--start-epoch',
type=int,
default=1,
help='starting epoch for resuming, default is 1 for new training')
parser.add_argument(
'--attempt',
type=int,
default=1,
help='current number of training')
parser.add_argument(
'--optimizer-name',
type=str,
default='nag',
help='optimizer name')
parser.add_argument(
'--lr',
type=float,
default=0.1,
help='learning rate')
parser.add_argument(
'--lr-mode',
type=str,
default='cosine',
help='learning rate scheduler mode. options are step, poly and cosine')
parser.add_argument(
'--lr-decay',
type=float,
default=0.1,
help='decay rate of learning rate')
parser.add_argument(
'--lr-decay-period',
type=int,
default=0,
help='interval for periodic learning rate decays. default is 0 to disable.')
parser.add_argument(
'--lr-decay-epoch',
type=str,
default='40,60',
help='epoches at which learning rate decays')
parser.add_argument(
'--target-lr',
type=float,
default=1e-8,
help='ending learning rate')
parser.add_argument(
'--poly-power',
type=float,
default=2,
help='power value for poly LR scheduler')
parser.add_argument(
'--warmup-epochs',
type=int,
default=0,
help='number of warmup epochs.')
parser.add_argument(
'--warmup-lr',
type=float,
default=1e-8,
help='starting warmup learning rate')
parser.add_argument(
'--warmup-mode',
type=str,
default='linear',
help='learning rate scheduler warmup mode. options are linear, poly and constant')
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='momentum value for optimizer')
parser.add_argument(
'--wd',
type=float,
default=0.0001,
help='weight decay rate')
parser.add_argument(
'--gamma-wd-mult',
type=float,
default=1.0,
help='weight decay multiplier for batchnorm gamma')
parser.add_argument(
'--beta-wd-mult',
type=float,
default=1.0,
help='weight decay multiplier for batchnorm beta')
parser.add_argument(
'--bias-wd-mult',
type=float,
default=1.0,
help='weight decay multiplier for bias')
parser.add_argument(
'--grad-clip',
type=float,
default=None,
help='max_norm for gradient clipping')
parser.add_argument(
'--label-smoothing',
action='store_true',
help='use label smoothing')
parser.add_argument(
'--mixup',
action='store_true',
help='use mixup strategy')
parser.add_argument(
'--mixup-epoch-tail',
type=int,
default=20,
help='number of epochs without mixup at the end of training')
parser.add_argument(
'--log-interval',
type=int,
default=200,
help='number of batches to wait before logging.')
parser.add_argument(
'--save-interval',
type=int,
default=4,
help='saving parameters epoch interval, best model will always be saved')
parser.add_argument(
'--save-dir',
type=str,
default='',
help='directory of saved models and log-files')
parser.add_argument(
'--logging-file-name',
type=str,
default='train.log',
help='filename of training log')
parser.add_argument(
'--seed',
type=int,
default=-1,
help='Random seed to be fixed')
parser.add_argument(
'--log-packages',
type=str,
default='mxnet',
help='list of python packages for logging')
parser.add_argument(
'--log-pip-packages',
type=str,
default='mxnet-cu100',
help='list of pip packages for logging')
parser.add_argument(
'--tune-layers',
type=str,
default='',
help='Regexp for selecting layers for fine tuning')
args = parser.parse_args()
return args
def init_rand(seed):
if seed <= 0:
seed = np.random.randint(10000)
random.seed(seed)
np.random.seed(seed)
mx.random.seed(seed)
return seed
def prepare_trainer(net,
optimizer_name,
wd,
momentum,
lr_mode,
lr,
lr_decay_period,
lr_decay_epoch,
lr_decay,
target_lr,
poly_power,
warmup_epochs,
warmup_lr,
warmup_mode,
batch_size,
num_epochs,
num_training_samples,
dtype,
gamma_wd_mult=1.0,
beta_wd_mult=1.0,
bias_wd_mult=1.0,
state_file_path=None):
if gamma_wd_mult != 1.0:
for k, v in net.collect_params(".*gamma").items():
v.wd_mult = gamma_wd_mult
if beta_wd_mult != 1.0:
for k, v in net.collect_params(".*beta").items():
v.wd_mult = beta_wd_mult
if bias_wd_mult != 1.0:
for k, v in net.collect_params(".*bias").items():
v.wd_mult = bias_wd_mult
if lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(',')]
num_batches = num_training_samples // batch_size
lr_scheduler = LRScheduler(
mode=lr_mode,
base_lr=lr,
n_iters=num_batches,
n_epochs=num_epochs,
step=lr_decay_epoch,
step_factor=lr_decay,
target_lr=target_lr,
power=poly_power,
warmup_epochs=warmup_epochs,
warmup_lr=warmup_lr,
warmup_mode=warmup_mode)
optimizer_params = {"learning_rate": lr,
"wd": wd,
"momentum": momentum,
"lr_scheduler": lr_scheduler}
if dtype != "float32":
optimizer_params["multi_precision"] = True
trainer = gluon.Trainer(
params=net.collect_params(),
optimizer=optimizer_name,
optimizer_params=optimizer_params)
if (state_file_path is not None) and state_file_path and os.path.exists(state_file_path):
logging.info("Loading trainer states: {}".format(state_file_path))
trainer.load_states(state_file_path)
if trainer._optimizer.wd != wd:
trainer._optimizer.wd = wd
logging.info("Reset the weight decay: {}".format(wd))
# lr_scheduler = trainer._optimizer.lr_scheduler
trainer._optimizer.lr_scheduler = lr_scheduler
return trainer, lr_scheduler
def save_params(file_stem,
net,
trainer):
net.save_parameters(file_stem + ".params")
trainer.save_states(file_stem + ".states")
def train_epoch(epoch,
net,
train_metric,
train_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx,
loss_func,
trainer,
lr_scheduler,
batch_size,
log_interval,
mixup,
mixup_epoch_tail,
label_smoothing,
num_classes,
num_epochs,
grad_clip_value,
batch_size_scale):
labels_list_inds = None
batch_size_extend_count = 0
tic = time.time()
if data_source_needs_reset:
train_data.reset()
train_metric.reset()
train_loss = 0.0
btic = time.time()
for i, batch in enumerate(train_data):
data_list, labels_list = batch_fn(batch, ctx)
if mixup:
labels_list_inds = labels_list
labels_list = [Y.one_hot(depth=num_classes) for Y in labels_list]
if epoch < num_epochs - mixup_epoch_tail:
alpha = 1
lam = np.random.beta(alpha, alpha)
data_list = [lam * X + (1 - lam) * X[::-1] for X in data_list]
labels_list = [lam * Y + (1 - lam) * Y[::-1] for Y in labels_list]
elif label_smoothing:
eta = 0.1
on_value = 1 - eta + eta / num_classes
off_value = eta / num_classes
labels_list_inds = labels_list
labels_list = [Y.one_hot(depth=num_classes, on_value=on_value, off_value=off_value) for Y in labels_list]
with ag.record():
outputs_list = [net(X.astype(dtype, copy=False)) for X in data_list]
loss_list = [loss_func(yhat, y.astype(dtype, copy=False)) for yhat, y in zip(outputs_list, labels_list)]
for loss in loss_list:
loss.backward()
lr_scheduler.update(i, epoch)
if grad_clip_value is not None:
grads = [v.grad(ctx[0]) for v in net.collect_params().values() if v._grad is not None]
gluon.utils.clip_global_norm(grads, max_norm=grad_clip_value)
if batch_size_scale == 1:
trainer.step(batch_size)
else:
if (i + 1) % batch_size_scale == 0:
batch_size_extend_count = 0
trainer.step(batch_size * batch_size_scale)
for p in net.collect_params().values():
p.zero_grad()
else:
batch_size_extend_count += 1
train_loss += sum([loss.mean().asscalar() for loss in loss_list]) / len(loss_list)
train_metric.update(
src_pts=(labels_list if not (mixup or label_smoothing) else labels_list_inds),
dst_pts=outputs_list)
if log_interval and not (i + 1) % log_interval:
speed = batch_size * log_interval / (time.time() - btic)
btic = time.time()
train_accuracy_msg = report_accuracy(metric=train_metric)
logging.info("Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\t{}\tlr={:.5f}".format(
epoch + 1, i, speed, train_accuracy_msg, trainer.learning_rate))
if (batch_size_scale != 1) and (batch_size_extend_count > 0):
trainer.step(batch_size * batch_size_extend_count)
for p in net.collect_params().values():
p.zero_grad()
throughput = int(batch_size * (i + 1) / (time.time() - tic))
logging.info("[Epoch {}] speed: {:.2f} samples/sec\ttime cost: {:.2f} sec".format(
epoch + 1, throughput, time.time() - tic))
train_loss /= (i + 1)
train_accuracy_msg = report_accuracy(metric=train_metric)
logging.info("[Epoch {}] training: {}\tloss={:.4f}".format(
epoch + 1, train_accuracy_msg, train_loss))
return train_loss
def train_net(batch_size,
num_epochs,
start_epoch1,
train_data,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
net,
trainer,
lr_scheduler,
lp_saver,
log_interval,
mixup,
mixup_epoch_tail,
label_smoothing,
num_classes,
grad_clip_value,
batch_size_scale,
val_metric,
train_metric,
opt_metric_name,
ctx):
assert (not (mixup and label_smoothing))
if batch_size_scale != 1:
for p in net.collect_params().values():
p.grad_req = "add"
if isinstance(ctx, mx.Context):
ctx = [ctx]
loss_func = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=(not (mixup or label_smoothing)))
assert (type(start_epoch1) == int)
assert (start_epoch1 >= 1)
if start_epoch1 > 1:
logging.info("Start training from [Epoch {}]".format(start_epoch1))
validate(
metric=val_metric,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(start_epoch1 - 1, val_accuracy_msg))
gtic = time.time()
for epoch in range(start_epoch1 - 1, num_epochs):
train_loss = train_epoch(
epoch=epoch,
net=net,
train_metric=train_metric,
train_data=train_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx,
loss_func=loss_func,
trainer=trainer,
lr_scheduler=lr_scheduler,
batch_size=batch_size,
log_interval=log_interval,
mixup=mixup,
mixup_epoch_tail=mixup_epoch_tail,
label_smoothing=label_smoothing,
num_classes=num_classes,
num_epochs=num_epochs,
grad_clip_value=grad_clip_value,
batch_size_scale=batch_size_scale)
validate(
metric=val_metric,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(epoch + 1, val_accuracy_msg))
if lp_saver is not None:
lp_saver_kwargs = {"net": net, "trainer": trainer}
val_acc_values = val_metric.get()[1]
train_acc_values = train_metric.get()[1]
val_acc_values = val_acc_values if type(val_acc_values) == list else [val_acc_values]
train_acc_values = train_acc_values if type(train_acc_values) == list else [train_acc_values]
lp_saver.epoch_test_end_callback(
epoch1=(epoch + 1),
params=(val_acc_values + train_acc_values + [train_loss, trainer.learning_rate]),
**lp_saver_kwargs)
logging.info("Total time cost: {:.2f} sec".format(time.time() - gtic))
if lp_saver is not None:
logging.info("Best {}: {:.4f} at {} epoch".format(
opt_metric_name, lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch))
def main():
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
tune_layers=args.tune_layers,
classes=args.num_classes,
in_channels=args.in_channels,
do_hybridize=(not args.not_hybridize),
ctx=ctx)
assert (hasattr(net, "classes"))
num_classes = net.classes
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
train_data = get_train_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
val_data = get_val_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
batch_fn = get_batch_fn(use_imgrec=ds_metainfo.use_imgrec)
num_training_samples = len(train_data._dataset) if not ds_metainfo.use_imgrec else ds_metainfo.num_training_samples
trainer, lr_scheduler = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
wd=args.wd,
momentum=args.momentum,
lr_mode=args.lr_mode,
lr=args.lr,
lr_decay_period=args.lr_decay_period,
lr_decay_epoch=args.lr_decay_epoch,
lr_decay=args.lr_decay,
target_lr=args.target_lr,
poly_power=args.poly_power,
warmup_epochs=args.warmup_epochs,
warmup_lr=args.warmup_lr,
warmup_mode=args.warmup_mode,
batch_size=batch_size,
num_epochs=args.num_epochs,
num_training_samples=num_training_samples,
dtype=args.dtype,
gamma_wd_mult=args.gamma_wd_mult,
beta_wd_mult=args.beta_wd_mult,
bias_wd_mult=args.bias_wd_mult,
state_file_path=args.resume_state)
if args.save_dir and args.save_interval:
param_names = ds_metainfo.val_metric_capts + ds_metainfo.train_metric_capts + ["Train.Loss", "LR"]
lp_saver = TrainLogParamSaver(
checkpoint_file_name_prefix="{}_{}".format(args.dataset.lower(), args.model),
last_checkpoint_file_name_suffix="last",
best_checkpoint_file_name_suffix=None,
last_checkpoint_dir_path=args.save_dir,
best_checkpoint_dir_path=None,
last_checkpoint_file_count=2,
best_checkpoint_file_count=2,
checkpoint_file_save_callback=save_params,
checkpoint_file_exts=(".params", ".states"),
save_interval=args.save_interval,
num_epochs=args.num_epochs,
param_names=param_names,
acc_ind=ds_metainfo.saver_acc_ind,
# bigger=[True],
# mask=None,
score_log_file_path=os.path.join(args.save_dir, "score.log"),
score_log_attempt_value=args.attempt,
best_map_log_file_path=os.path.join(args.save_dir, "best_map.log"))
else:
lp_saver = None
train_net(
batch_size=batch_size,
num_epochs=args.num_epochs,
start_epoch1=args.start_epoch,
train_data=train_data,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=ds_metainfo.use_imgrec,
dtype=args.dtype,
net=net,
trainer=trainer,
lr_scheduler=lr_scheduler,
lp_saver=lp_saver,
log_interval=args.log_interval,
mixup=args.mixup,
mixup_epoch_tail=args.mixup_epoch_tail,
label_smoothing=args.label_smoothing,
num_classes=num_classes,
grad_clip_value=args.grad_clip,
batch_size_scale=args.batch_size_scale,
val_metric=get_composite_metric(ds_metainfo.val_metric_names),
train_metric=get_composite_metric(ds_metainfo.train_metric_names),
opt_metric_name=ds_metainfo.val_metric_names[ds_metainfo.saver_acc_ind],
ctx=ctx)
if __name__ == "__main__":
main()
| 22,007 | 31.798808 | 119 | py |
imgclsmob | imgclsmob-master/other/eval_pt_seg-.py | import argparse
import time
import logging
from common.logger_utils import initialize_logging
from pytorch.model_stats import measure_model
from pytorch.seg_utils import add_dataset_parser_arguments, get_test_data_loader, get_metainfo, validate1
from pytorch.utils import prepare_pt_context, prepare_model, calc_net_weight_count
from pytorch.metrics.seg_metrics import PixelAccuracyMetric, MeanIoUMetric
def parse_args():
parser = argparse.ArgumentParser(
description='Evaluate a model for image segmentation (PyTorch/VOC2012/ADE20K/Cityscapes/COCO)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset',
type=str,
default="VOC",
help='dataset name. options are VOC, ADE20K, Cityscapes, COCO')
args, _ = parser.parse_known_args()
add_dataset_parser_arguments(parser, args.dataset)
parser.add_argument(
'--model',
type=str,
required=True,
help='type of model to use. see model_provider for options.')
parser.add_argument(
'--use-pretrained',
action='store_true',
help='enable using pretrained model from github.')
parser.add_argument(
'--resume',
type=str,
default='',
help='resume from previously saved parameters if not None')
parser.add_argument(
'--calc-flops',
dest='calc_flops',
action='store_true',
help='calculate FLOPs')
parser.add_argument(
'--calc-flops-only',
dest='calc_flops_only',
action='store_true',
help='calculate FLOPs without quality estimation')
parser.add_argument(
'--remove-module',
action='store_true',
help='enable if stored model has module')
parser.add_argument(
'--num-gpus',
type=int,
default=0,
help='number of gpus to use.')
parser.add_argument(
'-j',
'--num-data-workers',
dest='num_workers',
default=4,
type=int,
help='number of preprocessing workers')
parser.add_argument(
'--save-dir',
type=str,
default='',
help='directory of saved models and log-files')
parser.add_argument(
'--logging-file-name',
type=str,
default='train.log',
help='filename of training log')
parser.add_argument(
'--log-packages',
type=str,
default='torch, torchvision',
help='list of python packages for logging')
parser.add_argument(
'--log-pip-packages',
type=str,
default='',
help='list of pip packages for logging')
args = parser.parse_args()
return args
def test(net,
test_data,
use_cuda,
input_image_size,
in_channels,
num_classes,
calc_weight_count=False,
calc_flops=False,
calc_flops_only=True,
extended_log=False,
dataset_metainfo=None):
assert (dataset_metainfo is not None)
if not calc_flops_only:
metric = []
pix_acc_macro_average = False
metric.append(PixelAccuracyMetric(
vague_idx=dataset_metainfo["vague_idx"],
use_vague=dataset_metainfo["use_vague"],
macro_average=pix_acc_macro_average))
mean_iou_macro_average = False
metric.append(MeanIoUMetric(
num_classes=num_classes,
vague_idx=dataset_metainfo["vague_idx"],
use_vague=dataset_metainfo["use_vague"],
bg_idx=dataset_metainfo["background_idx"],
ignore_bg=dataset_metainfo["ignore_bg"],
macro_average=mean_iou_macro_average))
tic = time.time()
accuracy_info = validate1(
accuracy_metrics=metric,
net=net,
val_data=test_data,
use_cuda=use_cuda)
pix_acc = accuracy_info[0][1]
mean_iou = accuracy_info[1][1]
pix_macro = "macro" if pix_acc_macro_average else "micro"
iou_macro = "macro" if mean_iou_macro_average else "micro"
if extended_log:
logging.info(
"Test: {pix_macro}-pix_acc={pix_acc:.4f} ({pix_acc}), "
"{iou_macro}-mean_iou={mean_iou:.4f} ({mean_iou})".format(
pix_macro=pix_macro, pix_acc=pix_acc, iou_macro=iou_macro, mean_iou=mean_iou))
else:
logging.info("Test: {pix_macro}-pix_acc={pix_acc:.4f}, {iou_macro}-mean_iou={mean_iou:.4f}".format(
pix_macro=pix_macro, pix_acc=pix_acc, iou_macro=iou_macro, mean_iou=mean_iou))
logging.info('Time cost: {:.4f} sec'.format(
time.time() - tic))
if calc_weight_count:
weight_count = calc_net_weight_count(net)
if not calc_flops:
logging.info('Model: {} trainable parameters'.format(weight_count))
if calc_flops:
num_flops, num_macs, num_params = measure_model(net, in_channels, input_image_size)
assert (not calc_weight_count) or (weight_count == num_params)
stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \
" FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)"
logging.info(stat_msg.format(
params=num_params, params_m=num_params / 1e6,
flops=num_flops, flops_m=num_flops / 1e6,
flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6,
macs=num_macs, macs_m=num_macs / 1e6))
def main():
args = parse_args()
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
use_cuda, batch_size = prepare_pt_context(
num_gpus=args.num_gpus,
batch_size=1)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
use_cuda=use_cuda,
net_extra_kwargs={"aux": False, "fixed_size": False},
load_ignore_extra=True,
remove_module=args.remove_module)
if hasattr(net, 'module'):
input_image_size = net.module.in_size[0] if hasattr(net.module, 'in_size') else args.input_size
else:
input_image_size = net.in_size[0] if hasattr(net, 'in_size') else args.input_size
test_data = get_test_data_loader(
dataset_name=args.dataset,
dataset_dir=args.data_dir,
batch_size=batch_size,
num_workers=args.num_workers)
assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
test(
net=net,
test_data=test_data,
use_cuda=use_cuda,
# calc_weight_count=(not log_file_exist),
input_image_size=(input_image_size, input_image_size),
in_channels=args.in_channels,
num_classes=args.num_classes,
calc_weight_count=True,
calc_flops=args.calc_flops,
calc_flops_only=args.calc_flops_only,
extended_log=True,
dataset_metainfo=get_metainfo(args.dataset))
if __name__ == '__main__':
main()
| 7,218 | 33.706731 | 111 | py |
imgclsmob | imgclsmob-master/other/eval_gl_mch.py | """
Script for evaluating trained image matching model on MXNet/Gluon (under development).
"""
import os
import time
import logging
import argparse
import numpy as np
import mxnet as mx
from mxnet.gluon.utils import split_and_load
from common.logger_utils import initialize_logging
from gluon.utils import prepare_mx_context, prepare_model
from gluon.dataset_utils import get_dataset_metainfo
from gluon.dataset_utils import get_val_data_source
def add_eval_parser_arguments(parser):
"""
Create python script parameters (for eval specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="base data type for tensors")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters")
parser.add_argument(
"--calc-flops",
dest="calc_flops",
action="store_true",
help="calculate FLOPs")
parser.add_argument(
"--calc-flops-only",
dest="calc_flops_only",
action="store_true",
help="calculate FLOPs without quality estimation")
parser.add_argument(
"--data-subset",
type=str,
default="val",
help="data subset. options are val and test")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="mxnet, numpy",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="mxnet-cu100",
help="list of pip packages for logging")
parser.add_argument(
"--disable-cudnn-autotune",
action="store_true",
help="disable cudnn autotune for segmentation models")
parser.add_argument(
"--show-progress",
action="store_true",
help="show progress bar")
def parse_args():
"""
Parse python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image matching (Gluon/HPatches)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="HPatches",
help="dataset name")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_eval_parser_arguments(parser)
args = parser.parse_args()
return args
def warp_keypoints(keypoints, H):
num_points = keypoints.shape[0]
homogeneous_points = np.concatenate([keypoints, np.ones((num_points, 1))], axis=1)
warped_points = np.dot(homogeneous_points, np.transpose(H)).squeeze(axis=2)
return warped_points[:, :2] / warped_points[:, 2:]
def keep_true_keypoints(points, H, shape):
warped_points = warp_keypoints(points[:, [1, 0]], H)
warped_points[:, [0, 1]] = warped_points[:, [1, 0]]
mask = (warped_points[:, 0] >= 0) & (warped_points[:, 0] < shape[0]) &\
(warped_points[:, 1] >= 0) & (warped_points[:, 1] < shape[1])
return points[mask, :]
def filter_keypoints(points, shape):
mask = (points[:, 0] >= 0) & (points[:, 0] < shape[0]) &\
(points[:, 1] >= 0) & (points[:, 1] < shape[1])
return points[mask, :]
def select_k_best(conf_pts,
max_count=300):
sorted_pts = conf_pts[conf_pts[:, 2].argsort(), :2]
start = min(max_count, conf_pts.shape[0])
return sorted_pts[-start:, :]
def calc_repeatability_np(src_pts,
src_confs,
dst_conf_pts,
homography,
src_shape,
dst_shape):
distance_thresh = 3
filtered_warped_keypoints = keep_true_keypoints(dst_conf_pts, np.linalg.inv(homography), src_shape)
true_warped_keypoints = warp_keypoints(src_pts[:, [1, 0]], homography)
true_warped_keypoints = np.stack([true_warped_keypoints[:, 1], true_warped_keypoints[:, 0], src_confs], axis=-1)
true_warped_keypoints = filter_keypoints(true_warped_keypoints, dst_shape)
filtered_warped_keypoints = select_k_best(filtered_warped_keypoints)
true_warped_keypoints = select_k_best(true_warped_keypoints)
n1 = true_warped_keypoints.shape[0]
n2 = filtered_warped_keypoints.shape[0]
true_warped_keypoints = np.expand_dims(true_warped_keypoints, 1)
filtered_warped_keypoints = np.expand_dims(filtered_warped_keypoints, 0)
norm = np.linalg.norm(true_warped_keypoints - filtered_warped_keypoints, ord=None, axis=2)
count1 = 0
count2 = 0
if n2 != 0:
min1 = np.min(norm, axis=1)
count1 = np.sum(min1 <= distance_thresh)
if n1 != 0:
min2 = np.min(norm, axis=0)
count2 = np.sum(min2 <= distance_thresh)
if n1 + n2 > 0:
repeatability = (count1 + count2) / (n1 + n2)
else:
repeatability = 0
return n1, n2, repeatability
def batch_fn(batch, ctx):
data_src = split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
data_dst = split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
label = split_and_load(batch[2], ctx_list=ctx, batch_axis=0)
return data_src, data_dst, label
def calc_detector_repeatability(test_data,
net,
ctx):
tic = time.time()
repeatabilities = []
n1s = []
n2s = []
for batch in test_data:
data_src_list, data_dst_list, labels_list = batch_fn(batch, ctx)
outputs_src_list = [net(X) for X in data_src_list]
outputs_dst_list = [net(X) for X in data_dst_list]
for i in range(len(data_src_list)):
homography = labels_list[i].asnumpy()
data_src_i = data_src_list[i]
data_dst_i = data_dst_list[i]
src_shape = data_src_i.shape[2:]
dst_shape = data_dst_i.shape[2:]
src_pts, src_confs, src_desc_map = outputs_src_list[i]
dst_pts, dst_confs, dst_desc_map = outputs_dst_list[i]
# src_conf_pts = mx.nd.concat(src_pts[0], src_confs[0].reshape(shape=(-1, 1)), dim=1).asnumpy()
src_pts_np = src_pts[0].asnumpy()
src_confs_np = src_confs[0].asnumpy()
dst_conf_pts = mx.nd.concat(dst_pts[0], dst_confs[0].reshape(shape=(-1, 1)), dim=1).asnumpy()
n1, n2, repeatability = calc_repeatability_np(
src_pts_np,
src_confs_np,
dst_conf_pts,
homography,
src_shape,
dst_shape)
n1s.append(n1)
n2s.append(n2)
repeatabilities.append(repeatability)
logging.info("Average number of points in the first image: {}".format(np.mean(n1s)))
logging.info("Average number of points in the second image: {}".format(np.mean(n2s)))
logging.info("The repeatability: {:.4f}".format(np.mean(repeatabilities)))
logging.info("Time cost: {:.4f} sec".format(time.time() - tic))
def main():
"""
Main body of script.
"""
args = parse_args()
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
assert (args.batch_size == 1)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
net_extra_kwargs=ds_metainfo.test_net_extra_kwargs,
load_ignore_extra=False,
classes=args.classes,
in_channels=args.in_channels,
do_hybridize=False,
ctx=ctx)
test_data = get_val_data_source(
ds_metainfo=ds_metainfo,
batch_size=args.batch_size,
num_workers=args.num_workers)
calc_detector_repeatability(
test_data=test_data,
net=net,
ctx=ctx)
if __name__ == "__main__":
main()
| 9,800 | 30.213376 | 116 | py |
imgclsmob | imgclsmob-master/other/eval_gl_seg-.py | import os
import argparse
import time
import logging
import mxnet as mx
from common.logger_utils import initialize_logging
from gluon.utils import prepare_mx_context, prepare_model, calc_net_weight_count
from gluon.model_stats import measure_model
from gluon.seg_utils1 import add_dataset_parser_arguments, get_metainfo
from gluon.seg_utils1 import batch_fn
from gluon.seg_utils1 import get_test_data_source
from gluon.seg_utils1 import validate1
from gluon.metrics.seg_metrics import PixelAccuracyMetric, MeanIoUMetric
def add_eval_seg_parser_arguments(parser):
parser.add_argument(
'--model',
type=str,
required=True,
help='type of model to use. see model_provider for options.')
parser.add_argument(
'--use-pretrained',
action='store_true',
help='enable using pretrained model from gluon.')
parser.add_argument(
'--dtype',
type=str,
default='float32',
help='data type for training. default is float32')
parser.add_argument(
'--resume',
type=str,
default='',
help='resume from previously saved parameters if not None')
parser.add_argument(
'--calc-flops',
dest='calc_flops',
action='store_true',
help='calculate FLOPs')
parser.add_argument(
'--calc-flops-only',
dest='calc_flops_only',
action='store_true',
help='calculate FLOPs without quality estimation')
parser.add_argument(
'--num-gpus',
type=int,
default=0,
help='number of gpus to use.')
parser.add_argument(
'-j',
'--num-data-workers',
dest='num_workers',
default=4,
type=int,
help='number of preprocessing workers')
parser.add_argument(
'--save-dir',
type=str,
default='',
help='directory of saved models and log-files')
parser.add_argument(
'--logging-file-name',
type=str,
default='train.log',
help='filename of training log')
parser.add_argument(
'--log-packages',
type=str,
default='mxnet',
help='list of python packages for logging')
parser.add_argument(
'--log-pip-packages',
type=str,
default='mxnet-cu92, mxnet-cu100mkl, gluoncv',
help='list of pip packages for logging')
def parse_args():
parser = argparse.ArgumentParser(
description='Evaluate a model for image segmentation (Gluon/VOC2012/ADE20K/Cityscapes/COCO)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset',
type=str,
default="VOC",
help='dataset name. options are VOC, ADE20K, Cityscapes, COCO')
args, _ = parser.parse_known_args()
add_dataset_parser_arguments(parser, args.dataset)
add_eval_seg_parser_arguments(parser)
args = parser.parse_args()
return args
def test(net,
test_data,
data_source_needs_reset,
dtype,
ctx,
input_image_size,
in_channels,
classes,
calc_weight_count=False,
calc_flops=False,
calc_flops_only=True,
extended_log=False,
dataset_metainfo=None):
assert (dataset_metainfo is not None)
if not calc_flops_only:
metric = mx.metric.CompositeEvalMetric()
pix_acc_macro_average = False
metric.add(PixelAccuracyMetric(
vague_idx=dataset_metainfo["vague_idx"],
use_vague=dataset_metainfo["use_vague"],
macro_average=pix_acc_macro_average))
mean_iou_macro_average = False
metric.add(MeanIoUMetric(
num_classes=classes,
vague_idx=dataset_metainfo["vague_idx"],
use_vague=dataset_metainfo["use_vague"],
bg_idx=dataset_metainfo["background_idx"],
ignore_bg=dataset_metainfo["ignore_bg"],
macro_average=mean_iou_macro_average))
tic = time.time()
accuracy_info = validate1(
accuracy_metric=metric,
net=net,
val_data=test_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
pix_acc = accuracy_info[1][0]
mean_iou = accuracy_info[1][1]
pix_macro = "macro" if pix_acc_macro_average else "micro"
iou_macro = "macro" if mean_iou_macro_average else "micro"
if extended_log:
logging.info(
"Test: {pix_macro}-pix_acc={pix_acc:.4f} ({pix_acc}), "
"{iou_macro}-mean_iou={mean_iou:.4f} ({mean_iou})".format(
pix_macro=pix_macro, pix_acc=pix_acc, iou_macro=iou_macro, mean_iou=mean_iou))
else:
logging.info("Test: {pix_macro}-pix_acc={pix_acc:.4f}, {iou_macro}-mean_iou={mean_iou:.4f}".format(
pix_macro=pix_macro, pix_acc=pix_acc, iou_macro=iou_macro, mean_iou=mean_iou))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
if calc_weight_count:
weight_count = calc_net_weight_count(net)
if not calc_flops:
logging.info("Model: {} trainable parameters".format(weight_count))
if calc_flops:
num_flops, num_macs, num_params = measure_model(net, in_channels, input_image_size, ctx[0])
assert (not calc_weight_count) or (weight_count == num_params)
stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \
" FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)"
logging.info(stat_msg.format(
params=num_params, params_m=num_params / 1e6,
flops=num_flops, flops_m=num_flops / 1e6,
flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6,
macs=num_macs, macs_m=num_macs / 1e6))
def main():
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
args = parse_args()
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=1)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
net_extra_kwargs={"aux": False, "fixed_size": False},
load_ignore_extra=True,
classes=args.num_classes,
in_channels=args.in_channels,
do_hybridize=False,
ctx=ctx)
input_image_size = net.in_size if hasattr(net, 'in_size') else (480, 480)
test_data = get_test_data_source(
dataset_name=args.dataset,
dataset_dir=args.data_dir,
batch_size=batch_size,
num_workers=args.num_workers)
assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
test(
net=net,
test_data=test_data,
data_source_needs_reset=False,
dtype=args.dtype,
ctx=ctx,
input_image_size=input_image_size,
in_channels=args.in_channels,
classes=args.num_classes,
# calc_weight_count=(not log_file_exist),
calc_weight_count=True,
calc_flops=args.calc_flops,
calc_flops_only=args.calc_flops_only,
extended_log=True,
dataset_metainfo=get_metainfo(args.dataset))
if __name__ == '__main__':
main()
| 7,626 | 32.897778 | 111 | py |
imgclsmob | imgclsmob-master/other/eval_pt_cifar-.py | import argparse
import time
import logging
from common.logger_utils import initialize_logging
from pytorch.model_stats import measure_model
from pytorch.cifar1 import add_dataset_parser_arguments, get_val_data_loader
from pytorch.utils import prepare_pt_context, prepare_model, calc_net_weight_count, validate1, AverageMeter
def parse_args():
parser = argparse.ArgumentParser(
description='Evaluate a model for image classification (PyTorch/CIFAR/SVHN)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset',
type=str,
default="CIFAR10",
help='dataset name. options are CIFAR10, CIFAR100, and SVHN')
args, _ = parser.parse_known_args()
add_dataset_parser_arguments(parser, args.dataset)
parser.add_argument(
'--model',
type=str,
required=True,
help='type of model to use. see model_provider for options.')
parser.add_argument(
'--use-pretrained',
action='store_true',
help='enable using pretrained model from github.')
parser.add_argument(
'--resume',
type=str,
default='',
help='resume from previously saved parameters if not None')
parser.add_argument(
'--calc-flops',
dest='calc_flops',
action='store_true',
help='calculate FLOPs')
parser.add_argument(
'--calc-flops-only',
dest='calc_flops_only',
action='store_true',
help='calculate FLOPs without quality estimation')
parser.add_argument(
'--remove-module',
action='store_true',
help='enable if stored model has module')
parser.add_argument(
'--num-gpus',
type=int,
default=0,
help='number of gpus to use.')
parser.add_argument(
'-j',
'--num-data-workers',
dest='num_workers',
default=4,
type=int,
help='number of preprocessing workers')
parser.add_argument(
'--batch-size',
type=int,
default=512,
help='training batch size per device (CPU/GPU).')
parser.add_argument(
'--save-dir',
type=str,
default='',
help='directory of saved models and log-files')
parser.add_argument(
'--logging-file-name',
type=str,
default='train.log',
help='filename of training log')
parser.add_argument(
'--log-packages',
type=str,
default='torch, torchvision',
help='list of python packages for logging')
parser.add_argument(
'--log-pip-packages',
type=str,
default='',
help='list of pip packages for logging')
args = parser.parse_args()
return args
def test(net,
val_data,
use_cuda,
input_image_size,
in_channels,
calc_weight_count=False,
calc_flops=False,
calc_flops_only=True,
extended_log=False):
if not calc_flops_only:
accuracy_metric = AverageMeter()
tic = time.time()
err_val = validate1(
accuracy_metric=accuracy_metric,
net=net,
val_data=val_data,
use_cuda=use_cuda)
if extended_log:
logging.info('Test: err={err:.4f} ({err})'.format(
err=err_val))
else:
logging.info('Test: err={err:.4f}'.format(
err=err_val))
logging.info('Time cost: {:.4f} sec'.format(
time.time() - tic))
if calc_weight_count:
weight_count = calc_net_weight_count(net)
if not calc_flops:
logging.info('Model: {} trainable parameters'.format(weight_count))
if calc_flops:
num_flops, num_macs, num_params = measure_model(net, in_channels, input_image_size)
assert (not calc_weight_count) or (weight_count == num_params)
stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \
" FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)"
logging.info(stat_msg.format(
params=num_params, params_m=num_params / 1e6,
flops=num_flops, flops_m=num_flops / 1e6,
flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6,
macs=num_macs, macs_m=num_macs / 1e6))
def main():
args = parse_args()
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
use_cuda, batch_size = prepare_pt_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
use_cuda=use_cuda,
remove_module=args.remove_module)
if hasattr(net, 'module'):
input_image_size = net.module.in_size[0] if hasattr(net.module, 'in_size') else args.input_size
else:
input_image_size = net.in_size[0] if hasattr(net, 'in_size') else args.input_size
val_data = get_val_data_loader(
dataset_name=args.dataset,
dataset_dir=args.data_dir,
batch_size=batch_size,
num_workers=args.num_workers)
assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
test(
net=net,
val_data=val_data,
use_cuda=use_cuda,
# calc_weight_count=(not log_file_exist),
input_image_size=(input_image_size, input_image_size),
in_channels=args.in_channels,
calc_weight_count=True,
calc_flops=args.calc_flops,
calc_flops_only=args.calc_flops_only,
extended_log=True)
if __name__ == '__main__':
main()
| 5,894 | 30.524064 | 107 | py |
imgclsmob | imgclsmob-master/other/eval_pt_mch.py | """
Script for evaluating trained image matching model on PyTorch (under development).
"""
import os
import time
import logging
import argparse
import numpy as np
import torch
from common.logger_utils import initialize_logging
from pytorch.utils import prepare_pt_context, prepare_model
from pytorch.dataset_utils import get_dataset_metainfo
from pytorch.dataset_utils import get_val_data_source
from pytorch.metrics.ret_metrics import PointDescriptionMatchRatio
def add_eval_parser_arguments(parser):
"""
Create python script parameters (for eval specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="base data type for tensors")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters")
parser.add_argument(
"--calc-flops",
dest="calc_flops",
action="store_true",
help="calculate FLOPs")
parser.add_argument(
"--calc-flops-only",
dest="calc_flops_only",
action="store_true",
help="calculate FLOPs without quality estimation")
parser.add_argument(
"--data-subset",
type=str,
default="val",
help="data subset. options are val and test")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="mxnet, numpy",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="mxnet-cu100",
help="list of pip packages for logging")
parser.add_argument(
"--disable-cudnn-autotune",
action="store_true",
help="disable cudnn autotune for segmentation models")
parser.add_argument(
"--show-progress",
action="store_true",
help="show progress bar")
def parse_args():
"""
Parse python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image matching (PyTorch/HPatches)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="HPatches",
help="dataset name")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_eval_parser_arguments(parser)
args = parser.parse_args()
return args
class SuperPointFrontend(object):
""" Wrapper around pytorch net to help with pre and post image processing. """
def __init__(self,
nms_dist=4,
conf_thresh=0.015,
nn_thresh=0.7,
cuda=True):
self.nms_dist = nms_dist
self.conf_thresh = conf_thresh
self.nn_thresh = nn_thresh # L2 descriptor distance for good match.
self.cell = 8 # Size of each output cell. Keep this fixed.
self.border_remove = 4 # Remove points this close to the border.
def nms_fast(self, in_corners, H, W, dist_thresh):
"""
Run a faster approximate Non-Max-Suppression on numpy corners shaped:
3xN [x_i,y_i,conf_i]^T
Algo summary: Create a grid sized HxW. Assign each corner location a 1, rest
are zeros. Iterate through all the 1's and convert them either to -1 or 0.
Suppress points by setting nearby values to 0.
Grid Value Legend:
-1 : Kept.
0 : Empty or suppressed.
1 : To be processed (converted to either kept or supressed).
NOTE: The NMS first rounds points to integers, so NMS distance might not
be exactly dist_thresh. It also assumes points are within image boundaries.
Inputs
in_corners - 3xN numpy array with corners [x_i, y_i, confidence_i]^T.
H - Image height.
W - Image width.
dist_thresh - Distance to suppress, measured as an infinty norm distance.
Returns:
nmsed_corners - 3xN numpy matrix with surviving corners.
nmsed_inds - N length numpy vector with surviving corner indices.
"""
grid = np.zeros((H, W)).astype(int) # Track NMS data.
inds = np.zeros((H, W)).astype(int) # Store indices of points.
# Sort by confidence and round to nearest int.
inds1 = np.argsort(-in_corners[2, :])
corners = in_corners[:, inds1]
rcorners = corners[:2, :].round().astype(int) # Rounded corners.
# Check for edge case of 0 or 1 corners.
if rcorners.shape[1] == 0:
return np.zeros((3, 0)).astype(int), np.zeros(0).astype(int)
if rcorners.shape[1] == 1:
out = np.vstack((rcorners, in_corners[2])).reshape(3, 1)
return out, np.zeros((1)).astype(int)
# Initialize the grid.
for i, rc in enumerate(rcorners.T):
grid[rcorners[1, i], rcorners[0, i]] = 1
inds[rcorners[1, i], rcorners[0, i]] = i
# Pad the border of the grid, so that we can NMS points near the border.
pad = dist_thresh
grid = np.pad(grid, ((pad, pad), (pad, pad)), mode='constant')
# Iterate through points, highest to lowest conf, suppress neighborhood.
count = 0
for i, rc in enumerate(rcorners.T):
# Account for top and left padding.
pt = (rc[0] + pad, rc[1] + pad)
if grid[pt[1], pt[0]] == 1: # If not yet suppressed.
grid[pt[1] - pad:pt[1] + pad + 1, pt[0] - pad:pt[0] + pad + 1] = 0
grid[pt[1], pt[0]] = -1
count += 1
# Get all surviving -1's and return sorted array of remaining corners.
keepy, keepx = np.where(grid == -1)
keepy, keepx = keepy - pad, keepx - pad
inds_keep = inds[keepy, keepx]
out = corners[:, inds_keep]
values = out[-1, :]
inds2 = np.argsort(-values)
out = out[:, inds2]
out_inds = inds1[inds_keep[inds2]]
return out, out_inds
def run(self, net, img):
""" Process a numpy image to extract points and descriptors.
Input
img - HxW numpy float32 input image in range [0,1].
Output
corners - 3xN numpy array with corners [x_i, y_i, confidence_i]^T.
desc - 256xN numpy array of corresponding unit normalized descriptors.
heatmap - HxW numpy heatmap in range [0,1] of point confidences.
"""
import torch.nn as nn
# assert img.ndim == 2, 'Image must be grayscale.'
# assert img.dtype == np.float32, 'Image must be float32.'
# H, W = img.shape[0], img.shape[1]
# in_channels = img.copy()
# in_channels = (in_channels.reshape(1, H, W))
# in_channels = torch.from_numpy(in_channels)
# in_channels = torch.autograd.Variable(in_channels).view(1, 1, H, W)
# if self.cuda:
# in_channels = in_channels.cuda()
inp = img
H, W = img.shape[2], img.shape[3]
# Forward pass of network.
outs = net.forward(inp)
semi, coarse_desc = outs[0], outs[1]
# Convert pytorch -> numpy.
semi = semi.data.cpu().numpy().squeeze()
# --- Process points.
dense = np.exp(semi) # Softmax.
dense = dense / (np.sum(dense, axis=0) + .00001) # Should sum to 1.
# Remove dustbin.
nodust = dense[:-1, :, :]
# Reshape to get full resolution heatmap.
Hc = int(H / self.cell)
Wc = int(W / self.cell)
nodust = nodust.transpose(1, 2, 0)
heatmap = np.reshape(nodust, [Hc, Wc, self.cell, self.cell])
heatmap = np.transpose(heatmap, [0, 2, 1, 3])
heatmap = np.reshape(heatmap, [Hc * self.cell, Wc * self.cell])
xs, ys = np.where(heatmap >= self.conf_thresh) # Confidence threshold.
if len(xs) == 0:
return np.zeros((3, 0)), None, None
pts = np.zeros((3, len(xs))) # Populate point data sized 3xN.
pts[0, :] = ys
pts[1, :] = xs
pts[2, :] = heatmap[xs, ys]
pts, _ = self.nms_fast(pts, H, W, dist_thresh=self.nms_dist) # Apply NMS.
inds = np.argsort(pts[2, :])
pts = pts[:, inds[::-1]] # Sort by confidence.
# Remove points along border.
bord = self.border_remove
toremoveW = np.logical_or(pts[0, :] < bord, pts[0, :] >= (W - bord))
toremoveH = np.logical_or(pts[1, :] < bord, pts[1, :] >= (H - bord))
toremove = np.logical_or(toremoveW, toremoveH)
pts = pts[:, ~toremove]
# --- Process descriptor.
D = coarse_desc.shape[1]
if pts.shape[1] == 0:
desc = np.zeros((D, 0))
else:
# Interpolate into descriptor map using 2D point locations.
samp_pts = torch.from_numpy(pts[:2, :].copy())
samp_pts[0, :] = (samp_pts[0, :] / (float(W) / 2.)) - 1.
samp_pts[1, :] = (samp_pts[1, :] / (float(H) / 2.)) - 1.
samp_pts = samp_pts.transpose(0, 1).contiguous()
samp_pts = samp_pts.view(1, 1, -1, 2)
samp_pts = samp_pts.float()
# if self.cuda:
# samp_pts = samp_pts.cuda()
samp_pts = samp_pts.cuda()
desc = nn.functional.grid_sample(coarse_desc, samp_pts)
desc = desc.data.cpu().numpy().reshape(D, -1)
desc /= np.linalg.norm(desc, axis=0)[np.newaxis, :]
return pts, desc, heatmap
def warp_keypoints(src_pts, homography):
src_hmg_pts = np.concatenate([src_pts, np.ones((src_pts.shape[0], 1))], axis=1)
dst_hmg_pts = np.dot(src_hmg_pts, np.transpose(homography)).squeeze(axis=2)
dst_pts = dst_hmg_pts[:, :2] / dst_hmg_pts[:, 2:]
return dst_pts
def calc_filter_mask(pts, shape):
mask = (pts[:, 0] >= 0) & (pts[:, 0] < shape[0]) & (pts[:, 1] >= 0) & (pts[:, 1] < shape[1])
return mask
def select_k_best(pts,
confs,
max_count=300):
inds = confs.argsort()[::-1][:max_count]
return pts[inds, :], confs[inds]
def calc_repeatability_np(src_pts,
src_confs,
dst_pts,
dst_confs,
homography,
src_shape,
dst_shape,
distance_thresh=3):
pred_src_pts = warp_keypoints(dst_pts, np.linalg.inv(homography))
pred_src_mask = calc_filter_mask(pred_src_pts, src_shape)
label_dst_pts, label_dst_confs = dst_pts[pred_src_mask, :], dst_confs[pred_src_mask]
pred_dst_pts = warp_keypoints(src_pts, homography)
pred_dst_mask = calc_filter_mask(pred_dst_pts, dst_shape)
pred_dst_pts, pred_dst_confs = pred_dst_pts[pred_dst_mask, :], src_confs[pred_dst_mask]
label_dst_pts, label_dst_confs = select_k_best(label_dst_pts, label_dst_confs)
pred_dst_pts, pred_dst_confs = select_k_best(pred_dst_pts, pred_dst_confs)
n_pred = pred_dst_pts.shape[0]
n_label = label_dst_pts.shape[0]
label_dst_pts = np.stack([label_dst_pts[:, 0], label_dst_pts[:, 1], label_dst_confs], axis=1)
pred_dst_pts = np.stack([pred_dst_pts[:, 0], pred_dst_pts[:, 1], pred_dst_confs], axis=1)
pred_dst_pts = np.expand_dims(pred_dst_pts, 1)
label_dst_pts = np.expand_dims(label_dst_pts, 0)
norm = np.linalg.norm(pred_dst_pts - label_dst_pts, ord=None, axis=2)
count1 = 0
count2 = 0
if n_label != 0:
min1 = np.min(norm, axis=1)
count1 = np.sum(min1 <= distance_thresh)
if n_pred != 0:
min2 = np.min(norm, axis=0)
count2 = np.sum(min2 <= distance_thresh)
if n_pred + n_label > 0:
repeatability = (count1 + count2) / (n_pred + n_label)
else:
repeatability = 0
return n_pred, n_label, repeatability
def calc_detector_repeatability(test_data,
net,
use_cuda):
tic = time.time()
repeatabilities = []
n1s = []
n2s = []
# det_metric = PointDetectionMatchRatio(pts_max_count=100)
# det_metric.reset()
desc_metric = PointDescriptionMatchRatio(pts_max_count=10)
desc_metric.reset()
with torch.no_grad():
for data_src, data_dst, target in test_data:
if use_cuda:
data_src = data_src.cuda(non_blocking=True)
data_dst = data_dst.cuda(non_blocking=True)
# spf = SuperPointFrontend()
# src_pts, src_confs, src_desc_map = spf.run(net, data_src)
# dst_pts, dst_confs, dst_desc_map = spf.run(net, data_dst)
# src_pts = [src_pts.transpose()[:, [1, 0]].astype(np.int32)]
# dst_pts = [dst_pts.transpose()[:, [1, 0]].astype(np.int32)]
src_pts, src_confs, src_desc_map = net(data_src)
dst_pts, dst_confs, dst_desc_map = net(data_dst)
src_shape = data_src.cpu().detach().numpy().shape[2:]
dst_shape = data_dst.cpu().detach().numpy().shape[2:]
# print("data_src.shape={}".format(data_src.shape))
# print("data_dst.shape={}".format(data_dst.shape))
# import cv2
# scale_factor = 0.5
# num_pts = 100
#
# src_img = data_src.squeeze(0).transpose(0, 2).transpose(0, 1).cpu().detach().numpy()
# src_img = cv2.cvtColor(src_img, cv2.COLOR_GRAY2RGB)
# for i in range(min(src_pts[0].shape[0], num_pts)):
# assert (src_pts[0][i, 0] < src_shape[0])
# assert (src_pts[0][i, 1] < src_shape[1])
#
# cv2.circle(
# src_img,
# (src_pts[0][i, 1], src_pts[0][i, 0]),
# 5,
# (0, 0, 255),
# -1)
# cv2.imshow(
# winname="src_img",
# mat=cv2.resize(
# src=src_img,
# dsize=None,
# fx=scale_factor,
# fy=scale_factor,
# interpolation=cv2.INTER_NEAREST))
#
# dst_img = data_dst.squeeze(0).transpose(0, 2).transpose(0, 1).cpu().detach().numpy()
# dst_img = cv2.cvtColor(dst_img, cv2.COLOR_GRAY2RGB)
# for i in range(min(dst_pts[0].shape[0], num_pts)):
# assert (dst_pts[0][i, 0] < dst_shape[0])
# assert (dst_pts[0][i, 1] < dst_shape[1])
#
# cv2.circle(
# dst_img,
# (dst_pts[0][i, 1], dst_pts[0][i, 0]),
# 5,
# (0, 0, 255),
# -1)
# cv2.imshow(
# winname="dst_img",
# mat=cv2.resize(
# src=dst_img,
# dsize=None,
# fx=scale_factor,
# fy=scale_factor,
# interpolation=cv2.INTER_NEAREST))
#
# cv2.waitKey(0)
# for i in range(len(src_pts)):
# homography = target.cpu().detach().numpy()
#
# src_pts_np = src_pts[i].cpu().detach().numpy()
# src_confs_np = src_confs[i].cpu().detach().numpy()
#
# dst_pts_np = dst_pts[i].cpu().detach().numpy()
# dst_confs_np = dst_confs[i].cpu().detach().numpy()
#
# n1, n2, repeatability = calc_repeatability_np(
# src_pts_np,
# src_confs_np,
# dst_pts_np,
# dst_confs_np,
# homography,
# src_shape,
# dst_shape)
# n1s.append(n1)
# n2s.append(n2)
# repeatabilities.append(repeatability)
# det_metric.update_alt(
# homography=target[0],
# src_pts=src_pts[0],
# dst_pts=dst_pts[0],
# src_confs=src_confs[0],
# dst_confs=dst_confs[0],
# src_img_size=src_shape,
# dst_img_size=dst_shape)
desc_metric.update_alt(
homography=target[0],
src_pts=src_pts[0],
dst_pts=dst_pts[0],
src_descs=src_desc_map[0],
dst_descs=dst_desc_map[0],
src_img_size=src_shape,
dst_img_size=dst_shape)
logging.info("Average number of points in the first image: {}".format(np.mean(n1s)))
logging.info("Average number of points in the second image: {}".format(np.mean(n2s)))
logging.info("The repeatability: {:.4f}".format(np.mean(repeatabilities)))
logging.info("Time cost: {:.4f} sec".format(time.time() - tic))
def main():
"""
Main body of script.
"""
args = parse_args()
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
assert (args.batch_size == 1)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
use_cuda, batch_size = prepare_pt_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
use_cuda=use_cuda,
net_extra_kwargs=ds_metainfo.test_net_extra_kwargs,
load_ignore_extra=False,
num_classes=args.classes,
in_channels=args.in_channels,
remove_module=False)
test_data = get_val_data_source(
ds_metainfo=ds_metainfo,
batch_size=args.batch_size,
num_workers=args.num_workers)
calc_detector_repeatability(
test_data=test_data,
net=net,
use_cuda=use_cuda)
if __name__ == "__main__":
main()
| 19,664 | 35.620112 | 98 | py |
imgclsmob | imgclsmob-master/other/eval_pt_cub-.py | import argparse
import time
import logging
from common.logger_utils import initialize_logging
from pytorch.model_stats import measure_model
from pytorch.cub200_2011_utils1 import add_dataset_parser_arguments, get_val_data_loader
from pytorch.utils import prepare_pt_context, prepare_model, calc_net_weight_count, AverageMeter
# from pytorch.utils import validate
from pytorch.utils import validate1
def parse_args():
parser = argparse.ArgumentParser(
description='Evaluate a model for image classification (PyTorch/CUB-200-2011)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_dataset_parser_arguments(parser)
parser.add_argument(
'--model',
type=str,
required=True,
help='type of model to use. see model_provider for options.')
parser.add_argument(
'--use-pretrained',
action='store_true',
help='enable using pretrained model from github.')
parser.add_argument(
'--resume',
type=str,
default='',
help='resume from previously saved parameters if not None')
parser.add_argument(
'--calc-flops',
dest='calc_flops',
action='store_true',
help='calculate FLOPs')
parser.add_argument(
'--calc-flops-only',
dest='calc_flops_only',
action='store_true',
help='calculate FLOPs without quality estimation')
parser.add_argument(
'--remove-module',
action='store_true',
help='enable if stored model has module')
parser.add_argument(
'--num-gpus',
type=int,
default=0,
help='number of gpus to use.')
parser.add_argument(
'-j',
'--num-data-workers',
dest='num_workers',
default=4,
type=int,
help='number of preprocessing workers')
parser.add_argument(
'--batch-size',
type=int,
default=32,
help='training batch size per device (CPU/GPU).')
parser.add_argument(
'--save-dir',
type=str,
default='',
help='directory of saved models and log-files')
parser.add_argument(
'--logging-file-name',
type=str,
default='train.log',
help='filename of training log')
parser.add_argument(
'--log-packages',
type=str,
default='torch, torchvision',
help='list of python packages for logging')
parser.add_argument(
'--log-pip-packages',
type=str,
default='',
help='list of pip packages for logging')
args = parser.parse_args()
return args
# def test(net,
# val_data,
# use_cuda,
# input_image_size,
# in_channels,
# calc_weight_count=False,
# calc_flops=False,
# calc_flops_only=True,
# extended_log=False):
# if not calc_flops_only:
# acc_top1 = AverageMeter()
# acc_top5 = AverageMeter()
# tic = time.time()
# err_top1_val, err_top5_val = validate(
# acc_top1=acc_top1,
# acc_top5=acc_top5,
# net=net,
# val_data=val_data,
# use_cuda=use_cuda)
# if extended_log:
# logging.info('Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})'.format(
# top1=err_top1_val, top5=err_top5_val))
# else:
# logging.info('Test: err-top1={top1:.4f}\terr-top5={top5:.4f}'.format(
# top1=err_top1_val, top5=err_top5_val))
# logging.info('Time cost: {:.4f} sec'.format(
# time.time() - tic))
#
# if calc_weight_count:
# weight_count = calc_net_weight_count(net)
# if not calc_flops:
# logging.info('Model: {} trainable parameters'.format(weight_count))
# if calc_flops:
# num_flops, num_macs, num_params = measure_model(net, in_channels, input_image_size)
# assert (not calc_weight_count) or (weight_count == num_params)
# stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \
# " FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)"
# logging.info(stat_msg.format(
# params=num_params, params_m=num_params / 1e6,
# flops=num_flops, flops_m=num_flops / 1e6,
# flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6,
# macs=num_macs, macs_m=num_macs / 1e6))
def test(net,
val_data,
use_cuda,
input_image_size,
in_channels,
calc_weight_count=False,
calc_flops=False,
calc_flops_only=True,
extended_log=False):
if not calc_flops_only:
accuracy_metric = AverageMeter()
tic = time.time()
err_val = validate1(
accuracy_metric=accuracy_metric,
net=net,
val_data=val_data,
use_cuda=use_cuda)
if extended_log:
logging.info('Test: err={err:.4f} ({err})'.format(
err=err_val))
else:
logging.info('Test: err={err:.4f}'.format(
err=err_val))
logging.info('Time cost: {:.4f} sec'.format(
time.time() - tic))
if calc_weight_count:
weight_count = calc_net_weight_count(net)
if not calc_flops:
logging.info('Model: {} trainable parameters'.format(weight_count))
if calc_flops:
num_flops, num_macs, num_params = measure_model(net, in_channels, input_image_size)
assert (not calc_weight_count) or (weight_count == num_params)
stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \
" FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)"
logging.info(stat_msg.format(
params=num_params, params_m=num_params / 1e6,
flops=num_flops, flops_m=num_flops / 1e6,
flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6,
macs=num_macs, macs_m=num_macs / 1e6))
def main():
args = parse_args()
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
use_cuda, batch_size = prepare_pt_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
use_cuda=use_cuda,
remove_module=args.remove_module)
if hasattr(net, 'module'):
input_image_size = net.module.in_size[0] if hasattr(net.module, 'in_size') else args.input_size
else:
input_image_size = net.in_size[0] if hasattr(net, 'in_size') else args.input_size
val_data = get_val_data_loader(
dataset_dir=args.data_dir,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor)
assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
test(
net=net,
val_data=val_data,
use_cuda=use_cuda,
# calc_weight_count=(not log_file_exist),
input_image_size=(input_image_size, input_image_size),
in_channels=args.in_channels,
calc_weight_count=True,
calc_flops=args.calc_flops,
calc_flops_only=args.calc_flops_only,
extended_log=True)
if __name__ == '__main__':
main()
| 7,660 | 32.748899 | 103 | py |
imgclsmob | imgclsmob-master/other/chainer_/train_ch_in1k.py | import argparse
import numpy as np
import chainer
from chainer import cuda
from chainer import training
from chainer.training import extensions
from chainer.serializers import save_npz
from common.logger_utils import initialize_logging
from chainer_.utils import prepare_model
from chainer_.imagenet1k1 import add_dataset_parser_arguments
from chainer_.imagenet1k1 import get_data_iterators
def parse_args():
parser = argparse.ArgumentParser(
description='Train a model for image classification (Chainer/ImageNet-1K)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_dataset_parser_arguments(parser)
parser.add_argument(
'--model',
type=str,
required=True,
help='type of model to use. see model_provider for options.')
parser.add_argument(
'--use-pretrained',
action='store_true',
help='enable using pretrained model from gluon.')
parser.add_argument(
'--resume',
type=str,
default='',
help='resume from previously saved parameters if not None')
parser.add_argument(
'--resume-state',
type=str,
default='',
help='resume from previously saved optimizer state if not None')
parser.add_argument(
'--num-gpus',
type=int,
default=0,
help='number of gpus to use.')
parser.add_argument(
'-j',
'--num-data-workers',
dest='num_workers',
default=4,
type=int,
help='number of preprocessing workers')
parser.add_argument(
'--batch-size',
type=int,
default=512,
help='training batch size per device (CPU/GPU).')
parser.add_argument(
'--num-epochs',
type=int,
default=120,
help='number of training epochs.')
parser.add_argument(
'--start-epoch',
type=int,
default=1,
help='starting epoch for resuming, default is 1 for new training')
parser.add_argument(
'--attempt',
type=int,
default=1,
help='current number of training')
parser.add_argument(
'--optimizer-name',
type=str,
default='nag',
help='optimizer name')
parser.add_argument(
'--lr',
type=float,
default=0.1,
help='learning rate. default is 0.1')
parser.add_argument(
'--lr-mode',
type=str,
default='cosine',
help='learning rate scheduler mode. options are step, poly and cosine')
parser.add_argument(
'--lr-decay',
type=float,
default=0.1,
help='decay rate of learning rate. default is 0.1')
parser.add_argument(
'--lr-decay-period',
type=int,
default=0,
help='interval for periodic learning rate decays. default is 0 to disable.')
parser.add_argument(
'--lr-decay-epoch',
type=str,
default='40,60',
help='epoches at which learning rate decays. default is 40,60.')
parser.add_argument(
'--target-lr',
type=float,
default=1e-8,
help='ending learning rate; default is 1e-8')
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='momentum value for optimizer; default is 0.9')
parser.add_argument(
'--wd',
type=float,
default=0.0001,
help='weight decay rate. default is 0.0001.')
parser.add_argument(
'--log-interval',
type=int,
default=50,
help='number of batches to wait before logging.')
parser.add_argument(
'--save-interval',
type=int,
default=4,
help='saving parameters epoch interval, best model will always be saved')
parser.add_argument(
'--save-dir',
type=str,
default='',
help='directory of saved models and log-files')
parser.add_argument(
'--logging-file-name',
type=str,
default='train.log',
help='filename of training log')
parser.add_argument(
'--seed',
type=int,
default=-1,
help='Random seed to be fixed')
parser.add_argument(
'--log-packages',
type=str,
default='mxnet',
help='list of python packages for logging')
parser.add_argument(
'--log-pip-packages',
type=str,
default='mxnet-cu92, cupy-cuda100, gluoncv',
help='list of pip packages for logging')
args = parser.parse_args()
return args
def init_rand(seed):
if seed <= 0:
seed = np.random.randint(10000)
return seed
def prepare_trainer(net,
optimizer_name,
lr,
momentum,
num_epochs,
train_iter,
val_iter,
logging_dir_path,
num_gpus=0):
if optimizer_name == "sgd":
optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum)
elif optimizer_name == "nag":
optimizer = chainer.optimizers.NesterovAG(lr=lr, momentum=momentum)
else:
raise Exception('Unsupported optimizer: {}'.format(optimizer_name))
optimizer.setup(net)
# devices = tuple(range(num_gpus)) if num_gpus > 0 else (-1, )
devices = (0,) if num_gpus > 0 else (-1,)
updater = training.updaters.StandardUpdater(
iterator=train_iter,
optimizer=optimizer,
device=devices[0])
trainer = training.Trainer(
updater=updater,
stop_trigger=(num_epochs, 'epoch'),
out=logging_dir_path)
val_interval = 100000, 'iteration'
log_interval = 1000, 'iteration'
trainer.extend(
extension=extensions.Evaluator(
val_iter,
net,
device=devices[0]),
trigger=val_interval)
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(
extensions.snapshot_object(
net,
'model_iter_{.updater.iteration}'),
trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(
extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy',
'lr']),
trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
return trainer
def save_params(file_stem,
net,
trainer):
save_npz(
file=file_stem + '.npz',
obj=net)
save_npz(
file=file_stem + '.states',
obj=trainer)
def main():
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
num_gpus = args.num_gpus
if num_gpus > 0:
cuda.get_device(0).use()
batch_size = args.batch_size
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
num_gpus=num_gpus)
num_classes = net.classes if hasattr(net, 'classes') else 1000
input_image_size = net.in_size[0] if hasattr(net, 'in_size') else args.input_size
train_iter, val_iter = get_data_iterators(
data_dir=args.data_dir,
batch_size=batch_size,
num_workers=args.num_workers,
num_classes=num_classes,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor)
trainer = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
lr=args.lr,
momentum=args.momentum,
num_epochs=args.num_epochs,
train_iter=train_iter,
val_iter=val_iter,
logging_dir_path=args.save_dir,
num_gpus=num_gpus)
# if args.save_dir and args.save_interval:
# lp_saver = TrainLogParamSaver(
# checkpoint_file_name_prefix='imagenet_{}'.format(args.model),
# last_checkpoint_file_name_suffix="last",
# best_checkpoint_file_name_suffix=None,
# last_checkpoint_dir_path=args.save_dir,
# best_checkpoint_dir_path=None,
# last_checkpoint_file_count=2,
# best_checkpoint_file_count=2,
# checkpoint_file_save_callback=save_params,
# checkpoint_file_exts=['.npz', '.states'],
# save_interval=args.save_interval,
# num_epochs=args.num_epochs,
# param_names=['Val.Top1', 'Train.Top1', 'Val.Top5', 'Train.Loss', 'LR'],
# acc_ind=2,
# # bigger=[True],
# # mask=None,
# score_log_file_path=os.path.join(args.save_dir, 'score.log'),
# score_log_attempt_value=args.attempt,
# best_map_log_file_path=os.path.join(args.save_dir, 'best_map.log'))
# else:
# lp_saver = None
trainer.run()
if __name__ == '__main__':
main()
| 9,308 | 29.224026 | 115 | py |
imgclsmob | imgclsmob-master/other/chainer_/train_ch_cifar.py | import argparse
import numpy as np
import chainer
from chainer import cuda
from chainer import training
from chainer.training import extensions
from chainer.serializers import save_npz
from common.logger_utils import initialize_logging
from chainer_.utils import prepare_model
from chainer_.cifar1 import add_dataset_parser_arguments
from chainer_.cifar1 import get_data_iterators
def parse_args():
parser = argparse.ArgumentParser(
description='Train a model for image classification (Chainer/CIFAR)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset',
type=str,
default="CIFAR10",
help='dataset name. options are CIFAR10 and CIFAR100')
args, _ = parser.parse_known_args()
add_dataset_parser_arguments(parser, args.dataset)
parser.add_argument(
'--model',
type=str,
required=True,
help='type of model to use. see model_provider for options.')
parser.add_argument(
'--use-pretrained',
action='store_true',
help='enable using pretrained model from gluon.')
parser.add_argument(
'--resume',
type=str,
default='',
help='resume from previously saved parameters if not None')
parser.add_argument(
'--resume-state',
type=str,
default='',
help='resume from previously saved optimizer state if not None')
parser.add_argument(
'--num-gpus',
type=int,
default=0,
help='number of gpus to use.')
parser.add_argument(
'-j',
'--num-data-workers',
dest='num_workers',
default=4,
type=int,
help='number of preprocessing workers')
parser.add_argument(
'--batch-size',
type=int,
default=512,
help='training batch size per device (CPU/GPU).')
parser.add_argument(
'--num-epochs',
type=int,
default=120,
help='number of training epochs.')
parser.add_argument(
'--start-epoch',
type=int,
default=1,
help='starting epoch for resuming, default is 1 for new training')
parser.add_argument(
'--attempt',
type=int,
default=1,
help='current number of training')
parser.add_argument(
'--optimizer-name',
type=str,
default='nag',
help='optimizer name')
parser.add_argument(
'--lr',
type=float,
default=0.1,
help='learning rate. default is 0.1')
parser.add_argument(
'--lr-mode',
type=str,
default='cosine',
help='learning rate scheduler mode. options are step, poly and cosine')
parser.add_argument(
'--lr-decay',
type=float,
default=0.1,
help='decay rate of learning rate. default is 0.1')
parser.add_argument(
'--lr-decay-period',
type=int,
default=0,
help='interval for periodic learning rate decays. default is 0 to disable.')
parser.add_argument(
'--lr-decay-epoch',
type=str,
default='40,60',
help='epoches at which learning rate decays. default is 40,60.')
parser.add_argument(
'--target-lr',
type=float,
default=1e-8,
help='ending learning rate; default is 1e-8')
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='momentum value for optimizer; default is 0.9')
parser.add_argument(
'--wd',
type=float,
default=0.0001,
help='weight decay rate. default is 0.0001.')
parser.add_argument(
'--log-interval',
type=int,
default=50,
help='number of batches to wait before logging.')
parser.add_argument(
'--save-interval',
type=int,
default=4,
help='saving parameters epoch interval, best model will always be saved')
parser.add_argument(
'--save-dir',
type=str,
default='',
help='directory of saved models and log-files')
parser.add_argument(
'--logging-file-name',
type=str,
default='train.log',
help='filename of training log')
parser.add_argument(
'--seed',
type=int,
default=-1,
help='Random seed to be fixed')
parser.add_argument(
'--log-packages',
type=str,
default='mxnet',
help='list of python packages for logging')
parser.add_argument(
'--log-pip-packages',
type=str,
default='mxnet-cu92, cupy-cuda100, gluoncv',
help='list of pip packages for logging')
args = parser.parse_args()
return args
def init_rand(seed):
if seed <= 0:
seed = np.random.randint(10000)
return seed
def prepare_trainer(net,
optimizer_name,
lr,
momentum,
num_epochs,
train_iter,
val_iter,
logging_dir_path,
num_gpus=0):
if optimizer_name == "sgd":
optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum)
elif optimizer_name == "nag":
optimizer = chainer.optimizers.NesterovAG(lr=lr, momentum=momentum)
else:
raise Exception('Unsupported optimizer: {}'.format(optimizer_name))
optimizer.setup(net)
# devices = tuple(range(num_gpus)) if num_gpus > 0 else (-1, )
devices = (0,) if num_gpus > 0 else (-1,)
updater = training.updaters.StandardUpdater(
iterator=train_iter,
optimizer=optimizer,
device=devices[0])
trainer = training.Trainer(
updater=updater,
stop_trigger=(num_epochs, 'epoch'),
out=logging_dir_path)
val_interval = 100000, 'iteration'
log_interval = 1000, 'iteration'
trainer.extend(
extension=extensions.Evaluator(
val_iter,
net,
device=devices[0]),
trigger=val_interval)
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(
extensions.snapshot_object(
net,
'model_iter_{.updater.iteration}'),
trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(
extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy',
'lr']),
trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
return trainer
def save_params(file_stem,
net,
trainer):
save_npz(
file=file_stem + '.npz',
obj=net)
save_npz(
file=file_stem + '.states',
obj=trainer)
def main():
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
num_gpus = args.num_gpus
if num_gpus > 0:
cuda.get_device(0).use()
batch_size = args.batch_size
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
num_gpus=num_gpus)
train_iter, val_iter = get_data_iterators(
batch_size=batch_size,
num_workers=args.num_workers)
trainer = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
lr=args.lr,
momentum=args.momentum,
num_epochs=args.num_epochs,
train_iter=train_iter,
val_iter=val_iter,
logging_dir_path=args.save_dir,
num_gpus=num_gpus)
# if args.save_dir and args.save_interval:
# lp_saver = TrainLogParamSaver(
# checkpoint_file_name_prefix='imagenet_{}'.format(args.model),
# last_checkpoint_file_name_suffix="last",
# best_checkpoint_file_name_suffix=None,
# last_checkpoint_dir_path=args.save_dir,
# best_checkpoint_dir_path=None,
# last_checkpoint_file_count=2,
# best_checkpoint_file_count=2,
# checkpoint_file_save_callback=save_params,
# checkpoint_file_exts=['.npz', '.states'],
# save_interval=args.save_interval,
# num_epochs=args.num_epochs,
# param_names=['Val.Top1', 'Train.Top1', 'Val.Top5', 'Train.Loss', 'LR'],
# acc_ind=2,
# # bigger=[True],
# # mask=None,
# score_log_file_path=os.path.join(args.save_dir, 'score.log'),
# score_log_attempt_value=args.attempt,
# best_map_log_file_path=os.path.join(args.save_dir, 'best_map.log'))
# else:
# lp_saver = None
trainer.run()
if __name__ == '__main__':
main()
| 9,190 | 28.744337 | 115 | py |
imgclsmob | imgclsmob-master/other/gluon/seg_utils1.py | """
Segmentation datasets (VOC2012/ADE20K/Cityscapes/COCO) routines.
"""
__all__ = ['add_dataset_parser_arguments', 'batch_fn', 'get_test_data_source', 'get_num_training_samples', 'validate1',
'get_metainfo']
from tqdm import tqdm
from mxnet import gluon
from mxnet.gluon.data.vision import transforms
from gluon.datasets.voc_seg_dataset import VOCSegDataset
from gluon.datasets.ade20k_seg_dataset import ADE20KSegDataset
from gluon.datasets.cityscapes_seg_dataset import CityscapesSegDataset
from gluon.datasets.coco_seg_dataset import CocoSegDataset
# from gluoncv.data.mscoco.segmentation import COCOSegmentation
def add_dataset_parser_arguments(parser,
dataset_name):
if dataset_name == "VOC":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/voc',
help='path to directory with Pascal VOC2012 dataset')
parser.add_argument(
'--num-classes',
type=int,
default=21,
help='number of classes')
elif dataset_name == "ADE20K":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/ade20k',
help='path to directory with ADE20K dataset')
parser.add_argument(
'--num-classes',
type=int,
default=150,
help='number of classes')
elif dataset_name == "Cityscapes":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/cityscapes',
help='path to directory with Cityscapes dataset')
parser.add_argument(
'--num-classes',
type=int,
default=19,
help='number of classes')
elif dataset_name == "COCO":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/coco',
help='path to directory with COCO dataset')
parser.add_argument(
'--num-classes',
type=int,
default=21,
help='number of classes')
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
parser.add_argument(
'--in-channels',
type=int,
default=3,
help='number of input channels')
parser.add_argument(
'--image-base-size',
type=int,
default=520,
help='base image size')
parser.add_argument(
'--image-crop-size',
type=int,
default=480,
help='crop image size')
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
def get_num_training_samples(dataset_name):
if dataset_name == "ADE20K":
return None
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
def get_metainfo(dataset_name):
if dataset_name == "VOC":
return {
"vague_idx": VOCSegDataset.vague_idx,
"use_vague": VOCSegDataset.use_vague,
"background_idx": VOCSegDataset.background_idx,
"ignore_bg": VOCSegDataset.ignore_bg}
elif dataset_name == "ADE20K":
return {
"vague_idx": ADE20KSegDataset.vague_idx,
"use_vague": ADE20KSegDataset.use_vague,
"background_idx": ADE20KSegDataset.background_idx,
"ignore_bg": ADE20KSegDataset.ignore_bg}
elif dataset_name == "Cityscapes":
return {
"vague_idx": CityscapesSegDataset.vague_idx,
"use_vague": CityscapesSegDataset.use_vague,
"background_idx": CityscapesSegDataset.background_idx,
"ignore_bg": CityscapesSegDataset.ignore_bg}
elif dataset_name == "COCO":
return {
"vague_idx": CocoSegDataset.vague_idx,
"use_vague": CocoSegDataset.use_vague,
"background_idx": CocoSegDataset.background_idx,
"ignore_bg": CocoSegDataset.ignore_bg}
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
def get_test_data_source(dataset_name,
dataset_dir,
batch_size,
num_workers):
mean_rgb = (0.485, 0.456, 0.406)
std_rgb = (0.229, 0.224, 0.225)
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)
])
if dataset_name == "VOC":
dataset_class = VOCSegDataset
elif dataset_name == "ADE20K":
dataset_class = ADE20KSegDataset
elif dataset_name == "Cityscapes":
dataset_class = CityscapesSegDataset
elif dataset_name == "COCO":
dataset_class = CocoSegDataset
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
dataset = dataset_class(
root=dataset_dir,
mode="test",
transform=transform_val)
return gluon.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers)
def validate1(accuracy_metric,
net,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx):
if data_source_needs_reset:
val_data.reset()
accuracy_metric.reset()
for batch in tqdm(val_data):
data_list, labels_list = batch_fn(batch, ctx)
outputs_list = [net(X.astype(dtype, copy=False)) for X in data_list]
accuracy_metric.update(labels_list, outputs_list)
accuracy_info = accuracy_metric.get()
return accuracy_info
| 5,809 | 31.640449 | 119 | py |
imgclsmob | imgclsmob-master/other/gluon/khpa/khpa_utils.py | """
KHPA dataset routines.
"""
__all__ = ['add_dataset_parser_arguments', 'get_batch_fn', 'get_train_data_source', 'get_val_data_source', 'validate']
import math
from mxnet import gluon
from gluon.weighted_random_sampler import WeightedRandomSampler
from other.gluon.khpa.khpa_cls_dataset import KHPA
def add_dataset_parser_arguments(parser):
parser.add_argument(
'--data-path',
type=str,
default='../imgclsmob_data/khpa',
help='path to KHPA dataset')
parser.add_argument(
'--split-file',
type=str,
default='../imgclsmob_data/khpa/split.csv',
help='path to file with splitting training subset on training and validation ones')
parser.add_argument(
'--gen-split',
action='store_true',
help='whether generate split file')
parser.add_argument(
'--num-split-folders',
type=int,
default=10,
help='number of folders for validation subsets')
parser.add_argument(
'--stats-file',
type=str,
default='../imgclsmob_data/khpa/stats.json',
help='path to file with the dataset statistics')
parser.add_argument(
'--gen-stats',
action='store_true',
help='whether generate a file with the dataset statistics')
parser.add_argument(
'--input-size',
type=int,
default=224,
help='size of the input for model')
parser.add_argument(
'--resize-inv-factor',
type=float,
default=0.875,
help='inverted ratio for input image crop')
parser.add_argument(
'--num-classes',
type=int,
default=56,
help='number of classes')
parser.add_argument(
'--in-channels',
type=int,
default=4,
help='number of input channels')
def get_batch_fn():
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
# weight = gluon.utils.split_and_load(batch[2].astype(np.float32, copy=False), ctx_list=ctx, batch_axis=0)
return data, label
return batch_fn
def get_train_data_loader(data_dir_path,
split_file_path,
generate_split,
num_split_folders,
stats_file_path,
generate_stats,
batch_size,
num_workers,
model_input_image_size):
dataset = KHPA(
root=data_dir_path,
split_file_path=split_file_path,
generate_split=generate_split,
num_split_folders=num_split_folders,
stats_file_path=stats_file_path,
generate_stats=generate_stats,
model_input_image_size=model_input_image_size,
train=True)
sampler = WeightedRandomSampler(
length=len(dataset),
weights=dataset.sample_weights)
return gluon.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
# shuffle=True,
sampler=sampler,
last_batch="discard",
num_workers=num_workers)
def get_val_data_loader(data_dir_path,
split_file_path,
generate_split,
num_split_folders,
stats_file_path,
generate_stats,
batch_size,
num_workers,
model_input_image_size,
preproc_resize_image_size):
return gluon.data.DataLoader(
dataset=KHPA(
root=data_dir_path,
split_file_path=split_file_path,
generate_split=generate_split,
num_split_folders=num_split_folders,
stats_file_path=stats_file_path,
generate_stats=generate_stats,
preproc_resize_image_size=preproc_resize_image_size,
model_input_image_size=model_input_image_size,
train=False),
batch_size=batch_size,
shuffle=False,
num_workers=num_workers)
def get_train_data_source(dataset_args,
batch_size,
num_workers,
input_image_size=(224, 224)):
return get_train_data_loader(
data_dir_path=dataset_args.data_path,
split_file_path=dataset_args.split_file,
generate_split=dataset_args.gen_split,
num_split_folders=dataset_args.num_split_folders,
stats_file_path=dataset_args.stats_file,
generate_stats=dataset_args.gen_stats,
batch_size=batch_size,
num_workers=num_workers,
model_input_image_size=input_image_size)
def get_val_data_source(dataset_args,
batch_size,
num_workers,
input_image_size=(224, 224),
resize_inv_factor=0.875):
assert (resize_inv_factor > 0.0)
if isinstance(input_image_size, int):
input_image_size = (input_image_size, input_image_size)
resize_value = int(math.ceil(float(input_image_size[0]) / resize_inv_factor))
return get_val_data_loader(
data_dir_path=dataset_args.data_path,
split_file_path=dataset_args.split_file,
generate_split=dataset_args.gen_split,
num_split_folders=dataset_args.num_split_folders,
stats_file_path=dataset_args.stats_file,
generate_stats=dataset_args.gen_stats,
batch_size=batch_size,
num_workers=num_workers,
model_input_image_size=input_image_size,
preproc_resize_image_size=resize_value)
def validate(metric_calc,
net,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx):
if data_source_needs_reset:
val_data.reset()
metric_calc.reset()
for batch in val_data:
data_list, labels_list = batch_fn(batch, ctx)
onehot_outputs_list = [net(X.astype(dtype, copy=False)).reshape(0, -1, 2) for X in data_list]
labels_list_ = [Y.reshape(-1,) for Y in labels_list]
onehot_outputs_list_ = [Y.reshape(-1, 2) for Y in onehot_outputs_list]
metric_calc.update(
src_pts=labels_list_,
dst_pts=onehot_outputs_list_)
metric_name_value = metric_calc.get()
return metric_name_value
| 6,499 | 33.210526 | 118 | py |
imgclsmob | imgclsmob-master/other/gluon/khpa/khpa_cls_dataset.py | """
KHPA classification dataset.
"""
import os
import json
import logging
import numpy as np
import pandas as pd
import mxnet as mx
from mxnet.gluon.data import Dataset
from imgaug import augmenters as iaa
from imgaug import parameters as iap
class KHPA(Dataset):
"""
Load the KHPA classification dataset.
Parameters:
----------
root : str, default '~/.mxnet/datasets/imagenet'
Path to the folder stored the dataset.
train : bool, default True
Whether to load the training or validation set.
"""
def __init__(self,
root=os.path.join("~", ".mxnet", "datasets", "khpa"),
split_file_path=os.path.join("~", ".mxnet", "datasets", "khpa", "split.csv"),
generate_split=False,
num_split_folders=10,
working_split_folder_ind1=1,
stats_file_path=os.path.join("~", ".mxnet", "datasets", "khpa", "stats.json"),
generate_stats=False,
num_classes=28,
preproc_resize_image_size=(256, 256),
model_input_image_size=(224, 224),
train=True):
super(KHPA, self).__init__()
self.suffices = ("red", "green", "blue", "yellow")
root_dir_path = os.path.expanduser(root)
assert os.path.exists(root_dir_path)
train_file_name = "train.csv"
train_file_path = os.path.join(root_dir_path, train_file_name)
if not os.path.exists(train_file_path):
raise Exception("Train file doesn't exist: {}".format(train_file_path))
images_dir_path = os.path.join(root_dir_path, "train")
if not os.path.exists(images_dir_path):
raise Exception("Train image directory doesn't exist: {}".format(images_dir_path))
train_df = pd.read_csv(
train_file_path,
sep=",",
index_col=False,
dtype={"Id": np.unicode, "Target": np.unicode})
train_file_ids = train_df["Id"].values.astype(np.unicode)
train_file_labels = train_df["Target"].values.astype(np.unicode)
image_count = len(train_file_ids)
if os.path.exists(split_file_path):
if generate_split:
logging.info("Split file already exists: {}".format(split_file_path))
slice_df = pd.read_csv(
split_file_path,
sep=",",
index_col=False,
)
categories = slice_df["Folder{}".format(working_split_folder_ind1)].values.astype(np.uint8)
else:
if not generate_split:
raise Exception("Split file doesn't exist: {}".format(split_file_path))
label_position_lists, label_counts = self.calc_label_position_lists(
train_file_labels=train_file_labels,
num_classes=num_classes)
assert (num_split_folders <= label_counts.min())
unique_label_position_lists, unique_label_counts = self.calc_unique_label_position_lists(
label_position_lists=label_position_lists,
label_counts=label_counts)
assert (image_count == unique_label_counts.sum())
dataset_folder_table = self.create_dataset_folder_table(
num_samples=image_count,
num_folders=num_split_folders,
unique_label_position_lists=unique_label_position_lists)
assert (image_count == dataset_folder_table.sum())
slice_df_dict = {"Id": train_file_ids}
slice_df_dict.update({"Folder{}".format(i + 1): dataset_folder_table[i]
for i in range(num_split_folders)})
slice_df = pd.DataFrame(slice_df_dict)
slice_df.to_csv(
split_file_path,
sep=',',
index=False)
categories = slice_df["Folder{}".format(working_split_folder_ind1)].values.astype(np.uint8)
if os.path.exists(stats_file_path):
if generate_stats:
logging.info("Stats file already exists: {}".format(stats_file_path))
with open(stats_file_path, "r") as f:
stats_dict = json.load(f)
mean_rgby = np.array(stats_dict["mean_rgby"], np.float32)
std_rgby = np.array(stats_dict["std_rgby"], np.float32)
label_counts = np.array(stats_dict["label_counts"], np.int32)
else:
if not generate_split:
raise Exception("Stats file doesn't exist: {}".format(stats_file_path))
label_counts = self.calc_label_counts(train_file_labels, num_classes)
mean_rgby, std_rgby = self.calc_image_widths(train_file_ids, self.suffices, images_dir_path)
stats_dict = {
"mean_rgby": [float(x) for x in mean_rgby],
"std_rgby": [float(x) for x in std_rgby],
"label_counts": [int(x) for x in label_counts],
}
with open(stats_file_path, 'w') as f:
json.dump(stats_dict, f)
self.label_widths = self.calc_label_widths(label_counts, num_classes)
self.mean_rgby = mean_rgby
self.std_rgby = std_rgby
mask = (categories == (0 if train else 1))
self.train_file_ids = train_file_ids[mask]
list_labels = train_file_labels[mask]
self.images_dir_path = images_dir_path
self.num_classes = num_classes
self.train = train
self.onehot_labels = self.calc_onehot_labels(
num_classes=num_classes,
list_labels=list_labels)
if train:
self._transform = KHPATrainTransform(
mean=self.mean_rgby,
std=self.std_rgby,
crop_image_size=model_input_image_size)
self.sample_weights = self.calc_sample_weights(
label_widths=self.label_widths,
list_labels=list_labels)
else:
self._transform = KHPAValTransform(
mean=self.mean_rgby,
std=self.std_rgby,
resize_image_size=preproc_resize_image_size,
crop_image_size=model_input_image_size)
def __str__(self):
return self.__class__.__name__ + "({})".format(len(self.train_file_ids))
def __len__(self):
return len(self.train_file_ids)
def __getitem__(self, idx):
image_prefix = self.train_file_ids[idx]
image_prefix_path = os.path.join(self.images_dir_path, image_prefix)
imgs = []
for suffix in self.suffices:
image_file_path = "{}_{}.png".format(image_prefix_path, suffix)
img = mx.image.imread(image_file_path, flag=0)
imgs += [img]
img = mx.nd.concat(*imgs, dim=2)
label = mx.nd.array(self.onehot_labels[idx])
if self._transform is not None:
img, label = self._transform(img, label)
return img, label
@staticmethod
def calc_onehot_labels(num_classes, list_labels):
num_samples = len(list_labels)
onehot_labels = np.zeros((num_samples, num_classes), np.int32)
for i, train_file_label in enumerate(list_labels):
label_str_list = train_file_label.split()
for label_str in label_str_list:
label_int = int(label_str)
onehot_labels[i, label_int] = 1
return onehot_labels
@staticmethod
def calc_sample_weights(label_widths, list_labels):
label_widths1 = label_widths / label_widths.sum()
num_samples = len(list_labels)
sample_weights = np.zeros((num_samples, ), np.float64)
for i, train_file_label in enumerate(list_labels):
label_str_list = train_file_label.split()
for label_str in label_str_list:
label_int = int(label_str)
# sample_weights[i] += label_widths1[label_int]
sample_weights[i] = max(sample_weights[i], label_widths1[label_int])
assert (sample_weights.min() > 0.0)
sample_weights /= sample_weights.sum()
sample_weights = sample_weights.astype(np.float32)
return sample_weights
@staticmethod
def calc_label_position_lists(train_file_labels, num_classes):
label_counts = np.zeros((num_classes, ), np.int32)
label_position_lists = [[] for _ in range(num_classes)]
for sample_ind, train_file_label in enumerate(train_file_labels):
label_str_list = train_file_label.split()
for label_str in label_str_list:
label_int = int(label_str)
assert (0 <= label_int < num_classes)
label_counts[label_int] += 1
label_position_lists[label_int] += [sample_ind]
assert ([len(x) for x in label_position_lists] == list(label_counts))
return label_position_lists, label_counts
@staticmethod
def calc_unique_label_position_lists(label_position_lists, label_counts):
unique_label_position_lists = label_position_lists.copy()
unique_label_counts = label_counts.copy()
order_inds = np.argsort(label_counts)
for i, class_ind_i in enumerate(order_inds):
for sample_ind in unique_label_position_lists[class_ind_i]:
for class_ind_k in order_inds[(i + 1):]:
if sample_ind in unique_label_position_lists[class_ind_k]:
unique_label_position_lists[class_ind_k].remove(sample_ind)
unique_label_counts[class_ind_k] -= 1
assert ([len(x) for x in unique_label_position_lists] == list(unique_label_counts))
return unique_label_position_lists, unique_label_counts
@staticmethod
def create_dataset_folder_table(num_samples, num_folders, unique_label_position_lists):
dataset_folder_table = np.zeros((num_folders, num_samples), np.uint8)
for label_position_list in unique_label_position_lists:
label_positions = np.array(label_position_list)
np.random.shuffle(label_positions)
split_list = np.array_split(label_positions, indices_or_sections=num_folders)
for folder_ind, folder_split_list in enumerate(split_list):
dataset_folder_table[folder_ind, folder_split_list] = 1
return dataset_folder_table
@staticmethod
def calc_label_counts(train_file_labels, num_classes):
label_counts = np.zeros((num_classes, ), np.int32)
for train_file_label in train_file_labels:
label_str_list = train_file_label.split()
for label_str in label_str_list:
label_int = int(label_str)
assert (0 <= label_int < num_classes)
label_counts[label_int] += 1
return label_counts
@staticmethod
def calc_label_widths(label_counts, num_classes):
total_label_count = label_counts.sum()
label_widths = (1.0 / label_counts) / num_classes * total_label_count
return label_widths
@staticmethod
def calc_image_widths(train_file_ids, suffices, images_dir_path):
logging.info("Calculating image widths...")
mean_rgby = np.zeros((len(suffices),), np.float32)
std_rgby = np.zeros((len(suffices),), np.float32)
for i, suffix in enumerate(suffices):
logging.info("Processing suffix: {}".format(suffix))
imgs = []
for image_prefix in train_file_ids:
image_prefix_path = os.path.join(images_dir_path, image_prefix)
image_file_path = "{}_{}.png".format(image_prefix_path, suffix)
img = mx.image.imread(image_file_path, flag=0).asnumpy()
imgs += [img]
imgs = np.concatenate(tuple(imgs), axis=2).flatten()
mean_rgby[i] = imgs.mean()
imgs = imgs.astype(np.float32, copy=False)
imgs -= mean_rgby[i]
imgs **= 2
std = np.sqrt(imgs.mean() * len(imgs) / (len(imgs) - 1))
std_rgby[i] = std
logging.info("i={}, mean={}, std={}".format(i, mean_rgby[i], std_rgby[i]))
return mean_rgby, std_rgby
class KHPATrainTransform(object):
def __init__(self,
mean=(0.0, 0.0, 0.0, 0.0),
std=(1.0, 1.0, 1.0, 1.0),
crop_image_size=(224, 224)):
if isinstance(crop_image_size, int):
crop_image_size = (crop_image_size, crop_image_size)
self._mean = mean
self._std = std
self.crop_image_size = crop_image_size
self.seq = iaa.Sequential(
children=[
iaa.Sequential(
children=[
iaa.Fliplr(
p=0.5,
name="Fliplr"),
iaa.Flipud(
p=0.5,
name="Flipud"),
iaa.Sequential(
children=[
iaa.Affine(
scale={"x": (0.9, 1.1), "y": (0.9, 1.1)},
translate_percent={"x": (-0.05, 0.05), "y": (-0.05, 0.05)},
rotate=(-45, 45),
shear=(-16, 16),
order=iap.Choice([0, 1, 3], p=[0.15, 0.80, 0.05]),
mode="reflect",
name="Affine"),
iaa.Sometimes(
p=0.01,
then_list=iaa.PiecewiseAffine(
scale=(0.0, 0.01),
nb_rows=(4, 20),
nb_cols=(4, 20),
order=iap.Choice([0, 1, 3], p=[0.15, 0.80, 0.05]),
mode="reflect",
name="PiecewiseAffine"))],
random_order=True,
name="GeomTransform"),
iaa.Sequential(
children=[
iaa.Sometimes(
p=0.75,
then_list=iaa.Add(
value=(-10, 10),
per_channel=0.5,
name="Brightness")),
iaa.Sometimes(
p=0.05,
then_list=iaa.Emboss(
alpha=(0.0, 0.5),
strength=(0.5, 1.2),
name="Emboss")),
iaa.Sometimes(
p=0.1,
then_list=iaa.Sharpen(
alpha=(0.0, 0.5),
lightness=(0.5, 1.2),
name="Sharpen")),
iaa.Sometimes(
p=0.25,
then_list=iaa.ContrastNormalization(
alpha=(0.5, 1.5),
per_channel=0.5,
name="ContrastNormalization"))
],
random_order=True,
name="ColorTransform"),
iaa.Sequential(
children=[
iaa.Sometimes(
p=0.5,
then_list=iaa.AdditiveGaussianNoise(
loc=0,
scale=(0.0, 10.0),
per_channel=0.5,
name="AdditiveGaussianNoise")),
iaa.Sometimes(
p=0.1,
then_list=iaa.SaltAndPepper(
p=(0, 0.001),
per_channel=0.5,
name="SaltAndPepper"))],
random_order=True,
name="Noise"),
iaa.OneOf(
children=[
iaa.Sometimes(
p=0.05,
then_list=iaa.MedianBlur(
k=3,
name="MedianBlur")),
iaa.Sometimes(
p=0.05,
then_list=iaa.AverageBlur(
k=(2, 4),
name="AverageBlur")),
iaa.Sometimes(
p=0.5,
then_list=iaa.GaussianBlur(
sigma=(0.0, 2.0),
name="GaussianBlur"))],
name="Blur"),
],
random_order=True,
name="MainProcess")])
def __call__(self, img, label):
# import cv2
# cv2.imshow(winname="src_img1", mat=img.asnumpy()[:, :, :3])
# cv2.imshow(winname="src_img2", mat=img.asnumpy()[:, :, 1:])
seq_det = self.seq.to_deterministic()
imgs_aug = img.asnumpy().copy()
# imgs_aug = seq_det.augment_images(img.asnumpy().transpose((2, 0, 1)))
imgs_aug[:, :, :3] = seq_det.augment_image(imgs_aug[:, :, :3])
imgs_aug[:, :, 3:] = seq_det.augment_image(imgs_aug[:, :, 3:])
# img_np = imgs_aug.transpose((1, 2, 0))
img_np = imgs_aug
# cv2.imshow(winname="dst_img1", mat=img_np[:, :, :3])
# cv2.imshow(winname="dst_img2", mat=img_np[:, :, 1:])
# cv2.waitKey(0)
img_np = img_np.astype(np.float32)
img_np = (img_np - self._mean) / self._std
img = mx.nd.array(img_np, ctx=img.context)
img = mx.image.random_size_crop(
src=img,
size=self.crop_image_size,
area=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interp=1)[0]
img = img.transpose((2, 0, 1))
return img, label
class KHPAValTransform(object):
def __init__(self,
mean=(0.0, 0.0, 0.0, 0.0),
std=(1.0, 1.0, 1.0, 1.0),
resize_image_size=(256, 256),
crop_image_size=(224, 224)):
if isinstance(crop_image_size, int):
crop_image_size = (crop_image_size, crop_image_size)
self._mean = mean
self._std = std
self.resize_image_size = resize_image_size
self.crop_image_size = crop_image_size
def __call__(self, img, label):
h, w, _ = img.shape
if h > w:
wsize = self.resize_image_size
hsize = int(h * wsize / w)
else:
hsize = self.resize_image_size
wsize = int(w * hsize / h)
img = mx.image.imresize(
src=img,
w=wsize,
h=hsize,
interp=1)
img = mx.image.center_crop(
src=img,
size=self.crop_image_size,
interp=1)[0]
img = img.astype(np.float32)
img = (img - mx.nd.array(self._mean, ctx=img.context)) / mx.nd.array(self._std, ctx=img.context)
img = img.transpose((2, 0, 1))
return img, label
class KHPAMetaInfo(object):
label = "KHPA"
root_dir_name = "khpa"
dataset_class = KHPA
num_training_samples = None
in_channels = 4
num_classes = 56
input_image_size = (224, 224)
| 20,192 | 41.511579 | 104 | py |
imgclsmob | imgclsmob-master/other/gluon/khpa/eval_gl_khpa.py | import argparse
import time
import logging
import mxnet as mx
from common.logger_utils import initialize_logging
from gluon.utils import prepare_mx_context, prepare_model, calc_net_weight_count
from other.gluon.khpa.khpa_utils import add_dataset_parser_arguments
from other.gluon.khpa.khpa_utils import get_batch_fn
from other.gluon.khpa.khpa_utils import get_val_data_source
from other.gluon.khpa.khpa_utils import validate
def parse_args():
parser = argparse.ArgumentParser(
description='Evaluate a model for image classification (Gluon/KHPA)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_dataset_parser_arguments(parser)
parser.add_argument(
'--model',
type=str,
required=True,
help='type of model to use. see model_provider for options.')
parser.add_argument(
'--use-pretrained',
action='store_true',
help='enable using pretrained model from gluon.')
parser.add_argument(
'--dtype',
type=str,
default='float32',
help='data type for training. default is float32')
parser.add_argument(
'--resume',
type=str,
default='',
help='resume from previously saved parameters if not None')
parser.add_argument(
'--num-gpus',
type=int,
default=0,
help='number of gpus to use.')
parser.add_argument(
'-j',
'--num-data-workers',
dest='num_workers',
default=4,
type=int,
help='number of preprocessing workers')
parser.add_argument(
'--batch-size',
type=int,
default=512,
help='training batch size per device (CPU/GPU).')
parser.add_argument(
'--save-dir',
type=str,
default='',
help='directory of saved models and log-files')
parser.add_argument(
'--logging-file-name',
type=str,
default='train.log',
help='filename of training log')
parser.add_argument(
'--log-packages',
type=str,
default='mxnet',
help='list of python packages for logging')
parser.add_argument(
'--log-pip-packages',
type=str,
default='mxnet-cu92',
help='list of pip packages for logging')
args = parser.parse_args()
return args
def test(net,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx,
calc_weight_count=False,
extended_log=False):
rmse_calc = mx.metric.RMSE()
tic = time.time()
rmse_val_value = validate(
metric_calc=rmse_calc,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
if calc_weight_count:
weight_count = calc_net_weight_count(net)
logging.info('Model: {} trainable parameters'.format(weight_count))
if extended_log:
logging.info('Test: rmse={rmse:.4f} ({rmse})'.format(
rmse=rmse_val_value))
else:
logging.info('Test: rmse={rmse:.4f}'.format(
rmse=rmse_val_value))
logging.info('Time cost: {:.4f} sec'.format(
time.time() - tic))
def main():
args = parse_args()
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
tune_layers="",
classes=args.num_classes,
in_channels=args.in_channels,
ctx=ctx)
input_image_size = net.in_size if hasattr(net, 'in_size') else (args.input_size, args.input_size)
val_data = get_val_data_source(
dataset_args=args,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor)
batch_fn = get_batch_fn()
assert (args.use_pretrained or args.resume.strip())
test(
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=args.use_rec,
dtype=args.dtype,
ctx=ctx,
# calc_weight_count=(not log_file_exist),
calc_weight_count=True,
extended_log=True)
if __name__ == '__main__':
main()
| 4,686 | 27.23494 | 101 | py |
imgclsmob | imgclsmob-master/other/gluon/khpa/train_gl_khpa.py | import argparse
import time
import logging
import os
import numpy as np
import random
import mxnet as mx
from mxnet import gluon
from mxnet import autograd as ag
from common.logger_utils import initialize_logging
from common.train_log_param_saver import TrainLogParamSaver
from gluon.lr_scheduler import LRScheduler
from gluon.utils import prepare_mx_context, prepare_model
from other.gluon.khpa.khpa_utils import add_dataset_parser_arguments
from other.gluon.khpa.khpa_utils import get_batch_fn
from other.gluon.khpa.khpa_utils import get_train_data_source
from other.gluon.khpa.khpa_utils import get_val_data_source
from other.gluon.khpa.khpa_utils import validate
def parse_args():
parser = argparse.ArgumentParser(
description='Train a model for image classification (Gluon/KHPA)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_dataset_parser_arguments(parser)
parser.add_argument(
'--model',
type=str,
required=True,
help='type of model to use. see model_provider for options.')
parser.add_argument(
'--use-pretrained',
action='store_true',
help='enable using pretrained model from gluon.')
parser.add_argument(
'--dtype',
type=str,
default='float32',
help='data type for training')
parser.add_argument(
'--resume',
type=str,
default='',
help='resume from previously saved parameters if not None')
parser.add_argument(
'--resume-state',
type=str,
default='',
help='resume from previously saved optimizer state if not None')
parser.add_argument(
'--num-gpus',
type=int,
default=0,
help='number of gpus to use.')
parser.add_argument(
'-j',
'--num-data-workers',
dest='num_workers',
default=4,
type=int,
help='number of preprocessing workers')
parser.add_argument(
'--batch-size',
type=int,
default=512,
help='training batch size per device (CPU/GPU).')
parser.add_argument(
'--batch-size-scale',
type=int,
default=1,
help='manual batch-size increasing factor.')
parser.add_argument(
'--num-epochs',
type=int,
default=120,
help='number of training epochs.')
parser.add_argument(
'--start-epoch',
type=int,
default=1,
help='starting epoch for resuming, default is 1 for new training')
parser.add_argument(
'--attempt',
type=int,
default=1,
help='current number of training')
parser.add_argument(
'--optimizer-name',
type=str,
default='nag',
help='optimizer name')
parser.add_argument(
'--lr',
type=float,
default=0.1,
help='learning rate')
parser.add_argument(
'--lr-mode',
type=str,
default='cosine',
help='learning rate scheduler mode. options are step, poly and cosine')
parser.add_argument(
'--lr-decay',
type=float,
default=0.1,
help='decay rate of learning rate')
parser.add_argument(
'--lr-decay-period',
type=int,
default=0,
help='interval for periodic learning rate decays. default is 0 to disable.')
parser.add_argument(
'--lr-decay-epoch',
type=str,
default='40,60',
help='epoches at which learning rate decays')
parser.add_argument(
'--target-lr',
type=float,
default=1e-8,
help='ending learning rate')
parser.add_argument(
'--poly-power',
type=float,
default=2,
help='power value for poly LR scheduler')
parser.add_argument(
'--warmup-epochs',
type=int,
default=0,
help='number of warmup epochs.')
parser.add_argument(
'--warmup-lr',
type=float,
default=1e-8,
help='starting warmup learning rate')
parser.add_argument(
'--warmup-mode',
type=str,
default='linear',
help='learning rate scheduler warmup mode. options are linear, poly and constant')
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='momentum value for optimizer')
parser.add_argument(
'--wd',
type=float,
default=0.0001,
help='weight decay rate')
parser.add_argument(
'--gamma-wd-mult',
type=float,
default=1.0,
help='weight decay multiplier for batchnorm gamma')
parser.add_argument(
'--beta-wd-mult',
type=float,
default=1.0,
help='weight decay multiplier for batchnorm beta')
parser.add_argument(
'--bias-wd-mult',
type=float,
default=1.0,
help='weight decay multiplier for bias')
parser.add_argument(
'--grad-clip',
type=float,
default=None,
help='max_norm for gradient clipping')
parser.add_argument(
'--label-smoothing',
action='store_true',
help='use label smoothing')
parser.add_argument(
'--mixup',
action='store_true',
help='use mixup strategy')
parser.add_argument(
'--mixup-epoch-tail',
type=int,
default=20,
help='number of epochs without mixup at the end of training')
parser.add_argument(
'--log-interval',
type=int,
default=50,
help='number of batches to wait before logging.')
parser.add_argument(
'--save-interval',
type=int,
default=4,
help='saving parameters epoch interval, best model will always be saved')
parser.add_argument(
'--save-dir',
type=str,
default='',
help='directory of saved models and log-files')
parser.add_argument(
'--logging-file-name',
type=str,
default='train.log',
help='filename of training log')
parser.add_argument(
'--seed',
type=int,
default=-1,
help='Random seed to be fixed')
parser.add_argument(
'--log-packages',
type=str,
default='mxnet',
help='list of python packages for logging')
parser.add_argument(
'--log-pip-packages',
type=str,
default='mxnet-cu92',
help='list of pip packages for logging')
parser.add_argument(
'--tune-layers',
type=str,
default='',
help='Regexp for selecting layers for fine tuning')
args = parser.parse_args()
return args
def init_rand(seed):
if seed <= 0:
seed = np.random.randint(10000)
random.seed(seed)
np.random.seed(seed)
mx.random.seed(seed)
return seed
def prepare_trainer(net,
optimizer_name,
wd,
momentum,
lr_mode,
lr,
lr_decay_period,
lr_decay_epoch,
lr_decay,
target_lr,
poly_power,
warmup_epochs,
warmup_lr,
warmup_mode,
batch_size,
num_epochs,
num_training_samples,
dtype,
gamma_wd_mult=1.0,
beta_wd_mult=1.0,
bias_wd_mult=1.0,
state_file_path=None):
if gamma_wd_mult != 1.0:
for k, v in net.collect_params('.*gamma').items():
v.wd_mult = gamma_wd_mult
if beta_wd_mult != 1.0:
for k, v in net.collect_params('.*beta').items():
v.wd_mult = beta_wd_mult
if bias_wd_mult != 1.0:
for k, v in net.collect_params('.*bias').items():
v.wd_mult = bias_wd_mult
if lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(',')]
num_batches = num_training_samples // batch_size
lr_scheduler = LRScheduler(
mode=lr_mode,
base_lr=lr,
n_iters=num_batches,
n_epochs=num_epochs,
step=lr_decay_epoch,
step_factor=lr_decay,
target_lr=target_lr,
power=poly_power,
warmup_epochs=warmup_epochs,
warmup_lr=warmup_lr,
warmup_mode=warmup_mode)
optimizer_params = {'learning_rate': lr,
'wd': wd,
'momentum': momentum,
'lr_scheduler': lr_scheduler}
if dtype != 'float32':
optimizer_params['multi_precision'] = True
trainer = gluon.Trainer(
params=net.collect_params(),
optimizer=optimizer_name,
optimizer_params=optimizer_params)
if (state_file_path is not None) and state_file_path and os.path.exists(state_file_path):
logging.info('Loading trainer states: {}'.format(state_file_path))
trainer.load_states(state_file_path)
if trainer._optimizer.wd != wd:
trainer._optimizer.wd = wd
logging.info('Reset the weight decay: {}'.format(wd))
# lr_scheduler = trainer._optimizer.lr_scheduler
trainer._optimizer.lr_scheduler = lr_scheduler
return trainer, lr_scheduler
def save_params(file_stem,
net,
trainer):
net.save_parameters(file_stem + '.params')
trainer.save_states(file_stem + '.states')
def train_epoch(epoch,
net,
metric_calc,
train_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx,
loss_func,
trainer,
lr_scheduler,
batch_size,
log_interval,
grad_clip_value,
batch_size_scale):
batch_size_extend_count = 0
tic = time.time()
if data_source_needs_reset:
train_data.reset()
metric_calc.reset()
train_loss = 0.0
btic = time.time()
for i, batch in enumerate(train_data):
data_list, labels_list = batch_fn(batch, ctx)
onehot_labels_list = [Y.one_hot(depth=2) for Y in labels_list]
with ag.record():
onehot_outputs_list = [net(X.astype(dtype, copy=False)).reshape(0, -1, 2) for X in data_list]
loss_list = [loss_func(yhat, y.astype(dtype, copy=False)) for yhat, y in
zip(onehot_outputs_list, onehot_labels_list)]
for loss in loss_list:
loss.backward()
lr_scheduler.update(i, epoch)
if grad_clip_value is not None:
grads = [v.grad(ctx[0]) for v in net.collect_params().values() if v._grad is not None]
gluon.utils.clip_global_norm(grads, max_norm=grad_clip_value)
if batch_size_scale == 1:
trainer.step(batch_size)
else:
if (i + 1) % batch_size_scale == 0:
batch_size_extend_count = 0
trainer.step(batch_size * batch_size_scale)
for p in net.collect_params().values():
p.zero_grad()
else:
batch_size_extend_count += 1
train_loss += sum([loss.mean().asscalar() for loss in loss_list]) / len(loss_list)
labels_list_ = [Y.reshape(-1,) for Y in labels_list]
onehot_outputs_list_ = [Y.reshape(-1, 2) for Y in onehot_outputs_list]
metric_calc.update(
src_pts=labels_list_,
dst_pts=onehot_outputs_list_)
if log_interval and not (i + 1) % log_interval:
speed = batch_size * log_interval / (time.time() - btic)
btic = time.time()
metric_name, metric_value = metric_calc.get()
logging.info('Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\t{}={:.4f}\tlr={:.5f}'.format(
epoch + 1, i, speed, metric_name, metric_value, trainer.learning_rate))
if (batch_size_scale != 1) and (batch_size_extend_count > 0):
trainer.step(batch_size * batch_size_extend_count)
for p in net.collect_params().values():
p.zero_grad()
throughput = int(batch_size * (i + 1) / (time.time() - tic))
logging.info('[Epoch {}] speed: {:.2f} samples/sec\ttime cost: {:.2f} sec'.format(
epoch + 1, throughput, time.time() - tic))
train_loss /= (i + 1)
metric_name, metric_value = metric_calc.get()
logging.info('[Epoch {}] training: {}={:.4f}\tloss={:.4f}'.format(
epoch + 1, metric_name, metric_value, train_loss))
return metric_name, metric_value, train_loss
def train_net(batch_size,
num_epochs,
start_epoch1,
train_data,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
net,
trainer,
lr_scheduler,
lp_saver,
log_interval,
grad_clip_value,
batch_size_scale,
ctx):
if batch_size_scale != 1:
for p in net.collect_params().values():
p.grad_req = 'add'
if isinstance(ctx, mx.Context):
ctx = [ctx]
val_metric_calc = mx.metric.F1()
train_metric_calc = mx.metric.F1()
loss_func = gluon.loss.SigmoidBinaryCrossEntropyLoss()
assert (type(start_epoch1) == int)
assert (start_epoch1 >= 1)
if start_epoch1 > 1:
logging.info('Start training from [Epoch {}]'.format(start_epoch1))
val_metric_name_value = validate(
metric_calc=val_metric_calc,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
logging.info('[Epoch {}] validation: {}={:.4f}'.format(
start_epoch1 - 1, val_metric_name_value[0], val_metric_name_value[1]))
gtic = time.time()
for epoch in range(start_epoch1 - 1, num_epochs):
train_metric_name, train_metric_value, train_loss = train_epoch(
epoch=epoch,
net=net,
metric_calc=train_metric_calc,
train_data=train_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx,
loss_func=loss_func,
trainer=trainer,
lr_scheduler=lr_scheduler,
batch_size=batch_size,
log_interval=log_interval,
grad_clip_value=grad_clip_value,
batch_size_scale=batch_size_scale)
val_metric_name, val_metric_value = validate(
metric_calc=val_metric_calc,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
logging.info('[Epoch {}] validation: {}={:.4f}'.format(
epoch + 1, val_metric_name, val_metric_value))
if lp_saver is not None:
lp_saver_kwargs = {'net': net, 'trainer': trainer}
val_metric_value_dec = -val_metric_value
train_metric_value_dec = -train_metric_value
lp_saver.epoch_test_end_callback(
epoch1=(epoch + 1),
params=[val_metric_value_dec, train_metric_value_dec, train_loss, trainer.learning_rate],
**lp_saver_kwargs)
logging.info('Total time cost: {:.2f} sec'.format(time.time() - gtic))
if lp_saver is not None:
logging.info('Best err-top5: {:.4f} at {} epoch'.format(
lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch))
def main():
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
tune_layers=args.tune_layers,
classes=args.num_classes,
in_channels=args.in_channels,
ctx=ctx)
assert (hasattr(net, 'classes'))
assert (hasattr(net, 'in_size'))
# num_classes = net.classes if hasattr(net, 'classes') else 1000
input_image_size = net.in_size if hasattr(net, 'in_size') else (args.input_size, args.input_size)
train_data = get_train_data_source(
dataset_args=args,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size)
val_data = get_val_data_source(
dataset_args=args,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor)
batch_fn = get_batch_fn()
num_training_samples = len(train_data._dataset)
data_source_needs_reset = False
trainer, lr_scheduler = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
wd=args.wd,
momentum=args.momentum,
lr_mode=args.lr_mode,
lr=args.lr,
lr_decay_period=args.lr_decay_period,
lr_decay_epoch=args.lr_decay_epoch,
lr_decay=args.lr_decay,
target_lr=args.target_lr,
poly_power=args.poly_power,
warmup_epochs=args.warmup_epochs,
warmup_lr=args.warmup_lr,
warmup_mode=args.warmup_mode,
batch_size=batch_size,
num_epochs=args.num_epochs,
num_training_samples=num_training_samples,
dtype=args.dtype,
gamma_wd_mult=args.gamma_wd_mult,
beta_wd_mult=args.beta_wd_mult,
bias_wd_mult=args.bias_wd_mult,
state_file_path=args.resume_state)
if args.save_dir and args.save_interval:
metric_type = "F1"
lp_saver = TrainLogParamSaver(
checkpoint_file_name_prefix='imagenet_{}'.format(args.model),
last_checkpoint_file_name_suffix="last",
best_checkpoint_file_name_suffix=None,
last_checkpoint_dir_path=args.save_dir,
best_checkpoint_dir_path=None,
last_checkpoint_file_count=2,
best_checkpoint_file_count=2,
checkpoint_file_save_callback=save_params,
checkpoint_file_exts=('.params', '.states'),
save_interval=args.save_interval,
num_epochs=args.num_epochs,
param_names=['Val.' + metric_type, 'Train.' + metric_type, 'Train.Loss', 'LR'],
acc_ind=0,
# bigger=[True],
# mask=None,
score_log_file_path=os.path.join(args.save_dir, 'score.log'),
score_log_attempt_value=args.attempt,
best_map_log_file_path=os.path.join(args.save_dir, 'best_map.log'))
else:
lp_saver = None
train_net(
batch_size=batch_size,
num_epochs=args.num_epochs,
start_epoch1=args.start_epoch,
train_data=train_data,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=args.dtype,
net=net,
trainer=trainer,
lr_scheduler=lr_scheduler,
lp_saver=lp_saver,
log_interval=args.log_interval,
grad_clip_value=args.grad_clip,
batch_size_scale=args.batch_size_scale,
ctx=ctx)
if __name__ == '__main__':
main()
| 19,879 | 31.012882 | 105 | py |
imgclsmob | imgclsmob-master/other/pytorch/imagenet1k1.py | import math
import os
import cv2
import numpy as np
from PIL import Image
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
__all__ = ['add_dataset_parser_arguments', 'get_train_data_loader', 'get_val_data_loader']
def add_dataset_parser_arguments(parser):
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/imagenet',
help='path to directory with ImageNet-1K dataset')
parser.add_argument(
'--input-size',
type=int,
default=224,
help='size of the input for model')
parser.add_argument(
'--resize-inv-factor',
type=float,
default=0.875,
help='inverted ratio for input image crop')
parser.add_argument(
'--num-classes',
type=int,
default=1000,
help='number of classes')
parser.add_argument(
'--in-channels',
type=int,
default=3,
help='number of input channels')
parser.add_argument(
'--use-cv-resize',
action='store_true',
help='use OpenCV resize preprocessing')
def cv_loader(path):
img = cv2.imread(path, flags=1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
class CvResize(object):
"""
Resize the input PIL Image to the given size via OpenCV.
Parameters:
----------
size : int or tuple of (W, H)
Size of output image.
interpolation : int, default PIL.Image.BILINEAR
Interpolation method for resizing. By default uses bilinear
interpolation.
"""
def __init__(self,
size,
interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img):
"""
Resize image.
Parameters:
----------
img : PIL.Image
input image.
Returns:
-------
PIL.Image
Resulted image.
"""
if self.interpolation == Image.NEAREST:
cv_interpolation = cv2.INTER_NEAREST
elif self.interpolation == Image.BILINEAR:
cv_interpolation = cv2.INTER_LINEAR
elif self.interpolation == Image.BICUBIC:
cv_interpolation = cv2.INTER_CUBIC
elif self.interpolation == Image.LANCZOS:
cv_interpolation = cv2.INTER_LANCZOS4
else:
raise ValueError()
cv_img = np.array(img)
if isinstance(self.size, int):
w, h = img.size
if (w <= h and w == self.size) or (h <= w and h == self.size):
return img
if w < h:
out_size = (self.size, int(self.size * h / w))
else:
out_size = (int(self.size * w / h), self.size)
cv_img = cv2.resize(cv_img, dsize=out_size, interpolation=cv_interpolation)
return Image.fromarray(cv_img)
else:
cv_img = cv2.resize(cv_img, dsize=self.size, interpolation=cv_interpolation)
return Image.fromarray(cv_img)
def get_train_data_loader(data_dir,
batch_size,
num_workers,
input_image_size=224):
mean_rgb = (0.485, 0.456, 0.406)
std_rgb = (0.229, 0.224, 0.225)
jitter_param = 0.4
transform_train = transforms.Compose([
transforms.RandomResizedCrop(input_image_size),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param),
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)])
dataset = datasets.ImageFolder(
root=os.path.join(data_dir, 'train'),
transform=transform_train)
train_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True)
return train_loader
def get_val_data_loader(data_dir,
batch_size,
num_workers,
input_image_size=224,
resize_inv_factor=0.875,
use_cv_resize=False):
assert (resize_inv_factor > 0.0)
resize_value = int(math.ceil(float(input_image_size) / resize_inv_factor))
mean_rgb = (0.485, 0.456, 0.406)
std_rgb = (0.229, 0.224, 0.225)
transform_test = transforms.Compose([
CvResize(resize_value) if use_cv_resize else transforms.Resize(resize_value),
transforms.CenterCrop(input_image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)])
dataset = datasets.ImageFolder(
root=os.path.join(data_dir, 'val'),
transform=transform_test)
val_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
return val_loader
| 5,152 | 27.469613 | 90 | py |
imgclsmob | imgclsmob-master/other/pytorch/cub200_2011_utils1.py | """
CUB-200-2011 fine-grained classification dataset routines.
"""
__all__ = ['add_dataset_parser_arguments', 'get_train_data_loader', 'get_val_data_loader']
import math
import torch.utils.data
import torchvision.transforms as transforms
from pytorch.datasets.cub200_2011_cls_dataset import CUB200_2011
def add_dataset_parser_arguments(parser):
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/CUB_200_2011',
help='path to directory with CUB-200-2011 dataset')
parser.add_argument(
'--input-size',
type=int,
default=448,
help='size of the input for model')
parser.add_argument(
'--resize-inv-factor',
type=float,
default=0.74667,
help='inverted ratio for input image crop')
parser.add_argument(
'--num-classes',
type=int,
default=200,
help='number of classes')
parser.add_argument(
'--in-channels',
type=int,
default=3,
help='number of input channels')
def get_train_data_loader(dataset_dir,
batch_size,
num_workers,
input_image_size=448):
mean_rgb = (0.485, 0.456, 0.406)
std_rgb = (0.229, 0.224, 0.225)
jitter_param = 0.4
transform_train = transforms.Compose([
transforms.RandomResizedCrop(input_image_size),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param),
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)])
dataset = CUB200_2011(
root=dataset_dir,
train=True,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True)
return train_loader
def get_val_data_loader(dataset_dir,
batch_size,
num_workers,
input_image_size=448,
resize_inv_factor=0.74667):
assert (resize_inv_factor > 0.0)
resize_value = int(math.ceil(float(input_image_size) / resize_inv_factor))
mean_rgb = (0.485, 0.456, 0.406)
std_rgb = (0.229, 0.224, 0.225)
transform_val = transforms.Compose([
transforms.Resize(resize_value),
transforms.CenterCrop(input_image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)
])
dataset = CUB200_2011(
root=dataset_dir,
train=False,
transform=transform_val)
val_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
return val_loader
| 3,014 | 26.409091 | 90 | py |
imgclsmob | imgclsmob-master/other/pytorch/cifar1.py | """
CIFAR/SVHN dataset routines.
"""
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
__all__ = ['add_dataset_parser_arguments', 'get_train_data_loader', 'get_val_data_loader']
def add_dataset_parser_arguments(parser,
dataset_name):
if dataset_name == "CIFAR10":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/cifar10',
help='path to directory with CIFAR-10 dataset')
parser.add_argument(
'--num-classes',
type=int,
default=10,
help='number of classes')
elif dataset_name == "CIFAR100":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/cifar100',
help='path to directory with CIFAR-100 dataset')
parser.add_argument(
'--num-classes',
type=int,
default=100,
help='number of classes')
elif dataset_name == "SVHN":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/svhn',
help='path to directory with SVHN dataset')
parser.add_argument(
'--num-classes',
type=int,
default=10,
help='number of classes')
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
parser.add_argument(
'--in-channels',
type=int,
default=3,
help='number of input channels')
def get_train_data_loader(dataset_name,
dataset_dir,
batch_size,
num_workers):
mean_rgb = (0.4914, 0.4822, 0.4465)
std_rgb = (0.2023, 0.1994, 0.2010)
jitter_param = 0.4
transform_train = transforms.Compose([
transforms.RandomCrop(size=32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param),
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb),
])
if dataset_name == "CIFAR10":
dataset = datasets.CIFAR10(
root=dataset_dir,
train=True,
transform=transform_train,
download=True)
elif dataset_name == "CIFAR100":
dataset = datasets.CIFAR100(
root=dataset_dir,
train=True,
transform=transform_train,
download=True)
elif dataset_name == "SVHN":
dataset = datasets.SVHN(
root=dataset_dir,
split="train",
transform=transform_train,
download=True)
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
train_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True)
return train_loader
def get_val_data_loader(dataset_name,
dataset_dir,
batch_size,
num_workers):
mean_rgb = (0.4914, 0.4822, 0.4465)
std_rgb = (0.2023, 0.1994, 0.2010)
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb),
])
if dataset_name == "CIFAR10":
dataset = datasets.CIFAR10(
root=dataset_dir,
train=False,
transform=transform_val,
download=True)
elif dataset_name == "CIFAR100":
dataset = datasets.CIFAR100(
root=dataset_dir,
train=False,
transform=transform_val,
download=True)
elif dataset_name == "SVHN":
dataset = datasets.SVHN(
root=dataset_dir,
split="test",
transform=transform_val,
download=True)
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
val_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
return val_loader
| 4,409 | 28.205298 | 90 | py |
imgclsmob | imgclsmob-master/other/pytorch/seg_utils.py | """
Segmentation datasets (VOC2012/ADE20K/Cityscapes/COCO) routines.
"""
__all__ = ['add_dataset_parser_arguments', 'get_test_data_loader', 'validate1', 'get_metainfo']
from tqdm import tqdm
import torch.utils.data
import torchvision.transforms as transforms
from pytorch.datasets.voc_seg_dataset import VOCSegDataset
from pytorch.datasets.ade20k_seg_dataset import ADE20KSegDataset
from pytorch.datasets.cityscapes_seg_dataset import CityscapesSegDataset
from pytorch.datasets.coco_seg_dataset import CocoSegDataset
# import torchvision.datasets as datasets
def add_dataset_parser_arguments(parser,
dataset_name):
if dataset_name == "VOC":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/voc',
help='path to directory with Pascal VOC2012 dataset')
parser.add_argument(
'--num-classes',
type=int,
default=21,
help='number of classes')
elif dataset_name == "ADE20K":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/ade20k',
help='path to directory with ADE20K dataset')
parser.add_argument(
'--num-classes',
type=int,
default=150,
help='number of classes')
elif dataset_name == "Cityscapes":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/cityscapes',
help='path to directory with Cityscapes dataset')
parser.add_argument(
'--num-classes',
type=int,
default=19,
help='number of classes')
elif dataset_name == "COCO":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/coco',
help='path to directory with COCO dataset')
parser.add_argument(
'--num-classes',
type=int,
default=21,
help='number of classes')
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
parser.add_argument(
'--in-channels',
type=int,
default=3,
help='number of input channels')
parser.add_argument(
'--image-base-size',
type=int,
default=520,
help='base image size')
parser.add_argument(
'--image-crop-size',
type=int,
default=480,
help='crop image size')
def get_metainfo(dataset_name):
if dataset_name == "VOC":
return {
"vague_idx": VOCSegDataset.vague_idx,
"use_vague": VOCSegDataset.use_vague,
"background_idx": VOCSegDataset.background_idx,
"ignore_bg": VOCSegDataset.ignore_bg}
elif dataset_name == "ADE20K":
return {
"vague_idx": ADE20KSegDataset.vague_idx,
"use_vague": ADE20KSegDataset.use_vague,
"background_idx": ADE20KSegDataset.background_idx,
"ignore_bg": ADE20KSegDataset.ignore_bg}
elif dataset_name == "Cityscapes":
return {
"vague_idx": CityscapesSegDataset.vague_idx,
"use_vague": CityscapesSegDataset.use_vague,
"background_idx": CityscapesSegDataset.background_idx,
"ignore_bg": CityscapesSegDataset.ignore_bg}
elif dataset_name == "COCO":
return {
"vague_idx": CocoSegDataset.vague_idx,
"use_vague": CocoSegDataset.use_vague,
"background_idx": CocoSegDataset.background_idx,
"ignore_bg": CocoSegDataset.ignore_bg}
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
def get_test_data_loader(dataset_name,
dataset_dir,
batch_size,
num_workers):
mean_rgb = (0.485, 0.456, 0.406)
std_rgb = (0.229, 0.224, 0.225)
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb),
])
if dataset_name == "VOC":
dataset_class = VOCSegDataset
elif dataset_name == "ADE20K":
dataset_class = ADE20KSegDataset
elif dataset_name == "Cityscapes":
dataset_class = CityscapesSegDataset
elif dataset_name == "COCO":
dataset_class = CocoSegDataset
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
dataset = dataset_class(
root=dataset_dir,
mode="test",
transform=transform_val)
val_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
return val_loader
def validate1(accuracy_metrics,
net,
val_data,
use_cuda):
net.eval()
for metric in accuracy_metrics:
metric.reset()
with torch.no_grad():
for data, target in tqdm(val_data):
if use_cuda:
target = target.cuda(non_blocking=True)
output = net(data)
for metric in accuracy_metrics:
metric.update(target, output)
accuracy_info = [metric.get() for metric in accuracy_metrics]
return accuracy_info
| 5,401 | 31.347305 | 95 | py |
imgclsmob | imgclsmob-master/tensorflow_/utils_tp.py | import math
import logging
import os
import multiprocessing
import numpy as np
import cv2
import tensorflow as tf
from tensorpack.models import regularize_cost
from tensorpack.tfutils.summary import add_moving_summary
# from tensorpack.tfutils.summary import add_tensor_summary
from tensorpack import ModelDesc, get_current_tower_context
from tensorpack import InputDesc, PlaceholderInput, TowerContext
from tensorpack.tfutils import get_model_loader, model_utils
# from tensorpack.tfutils import get_default_sess_config
from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData
# from tensorpack.dataflow import PrefetchData
from tensorpack.dataflow import MultiThreadMapData
# from tensorpack.dataflow import MapData
from tensorpack.utils import logger
from .tensorflowcv.model_provider import get_model
from .tensorflowcv.models.common import is_channels_first
class CachedChiefSessionCreator(tf.train.ChiefSessionCreator):
def __init__(self,
scaffold=None,
master="",
config=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None):
super(CachedChiefSessionCreator, self).__init__(
scaffold=scaffold,
master=master,
config=config,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path)
self.cached_sess = None
def create_session(self):
if self.cached_sess is None:
self.cached_sess = super(CachedChiefSessionCreator, self).create_session()
return self.cached_sess
class ImageNetModel(ModelDesc):
def __init__(self,
model_lambda,
image_size=224,
data_format="channels_last",
**kwargs):
super(ImageNetModel, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.model_lambda = model_lambda
self.image_size = image_size
self.image_dtype = tf.float32
self.data_format = data_format
self.label_smoothing = 0.0
self.loss_scale = 1.0
self.weight_decay = 1e-4
"""
Whether the image is BGR or RGB. If using DataFlow, then it should be BGR.
"""
self.image_bgr = False
"""
To apply on normalization parameters, use '.*/W|.*/gamma|.*/beta'
"""
self.weight_decay_pattern = ".*/kernel"
def inputs(self):
return [tf.placeholder(self.image_dtype, (None, self.image_size, self.image_size, 3), "input"),
tf.placeholder(tf.int32, (None,), "label")]
def build_graph(self,
image,
label):
image = self.image_preprocess(image)
if is_channels_first(self.data_format):
image = tf.transpose(image, [0, 3, 1, 2], name="image_transpose")
# tf.summary.image('input_image_', image)
# tf.summary.tensor_summary('input_tensor_', image)
# with tf.name_scope('tmp1_summaries'):
# add_tensor_summary(image, ['histogram', 'rms', 'sparsity'], name='tmp1_tensor')
is_training = get_current_tower_context().is_training
logits = self.model_lambda(
x=image,
training=is_training)
loss = ImageNetModel.compute_loss_and_error(
logits=logits,
label=label,
label_smoothing=self.label_smoothing)
if self.weight_decay > 0:
wd_loss = regularize_cost(
regex=self.weight_decay_pattern,
func=tf.contrib.layers.l2_regularizer(self.weight_decay),
name="l2_regularize_loss")
add_moving_summary(loss, wd_loss)
total_cost = tf.add_n([loss, wd_loss], name="cost")
else:
total_cost = tf.identity(loss, name="cost")
add_moving_summary(total_cost)
if self.loss_scale != 1.0:
logger.info("Scaling the total loss by {} ...".format(self.loss_scale))
return total_cost * self.loss_scale
else:
return total_cost
def optimizer(self):
lr = tf.get_variable("learning_rate", initializer=0.1, trainable=False)
tf.summary.scalar("learning_rate-summary", lr)
return tf.train.MomentumOptimizer(
learning_rate=lr,
momentum=0.9,
use_nesterov=True)
def image_preprocess(self,
image):
with tf.name_scope("image_preprocess"):
if image.dtype.base_dtype != tf.float32:
image = tf.cast(image, tf.float32)
mean = np.array([0.485, 0.456, 0.406], np.float32) * 255.0 # rgb
std = np.array([0.229, 0.224, 0.225], np.float32) * 255.0
if self.image_bgr:
mean = mean[::-1]
std = std[::-1]
image_mean = tf.constant(mean, dtype=tf.float32)
image_std = tf.constant(std, dtype=tf.float32)
image = (image - image_mean) / image_std
return image
@staticmethod
def compute_loss_and_error(logits,
label,
label_smoothing=0.0):
if label_smoothing == 0.0:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=label)
else:
num_classes = logits.shape[-1]
loss = tf.losses.softmax_cross_entropy(
onehot_labels=tf.one_hot(label, num_classes),
logits=logits,
label_smoothing=label_smoothing)
loss = tf.reduce_mean(loss, name="xentropy-loss")
def prediction_incorrect(logits, label, topk=1, name="incorrect_vector"):
with tf.name_scope("prediction_incorrect"):
x = tf.logical_not(tf.nn.in_top_k(predictions=logits, targets=label, k=topk))
return tf.cast(x, tf.float32, name=name)
error_top1 = prediction_incorrect(logits, label, topk=1, name="wrong-top1")
add_moving_summary(tf.reduce_mean(error_top1, name="train-error-top1"))
error_top5 = prediction_incorrect(logits, label, topk=5, name="wrong-top5")
add_moving_summary(tf.reduce_mean(error_top5, name="train-error-top5"))
return loss
class GoogleNetResize(imgaug.ImageAugmentor):
"""
crop 8%~100% of the original image
See `Going Deeper with Convolutions` by Google.
"""
def __init__(self,
crop_area_fraction=0.08,
aspect_ratio_low=0.75,
aspect_ratio_high=1.333,
target_shape=224):
self._init(locals())
def _augment(self, img, _):
h, w = img.shape[:2]
area = h * w
for _ in range(10):
targetArea = self.rng.uniform(self.crop_area_fraction, 1.0) * area
aspectR = self.rng.uniform(self.aspect_ratio_low, self.aspect_ratio_high)
ww = int(np.sqrt(targetArea * aspectR) + 0.5)
hh = int(np.sqrt(targetArea / aspectR) + 0.5)
if self.rng.uniform() < 0.5:
ww, hh = hh, ww
if hh <= h and ww <= w:
x1 = 0 if w == ww else self.rng.randint(0, w - ww)
y1 = 0 if h == hh else self.rng.randint(0, h - hh)
out = img[y1:y1 + hh, x1:x1 + ww]
out = cv2.resize(out, (self.target_shape, self.target_shape), interpolation=cv2.INTER_CUBIC)
return out
out = imgaug.ResizeShortestEdge(self.target_shape, interp=cv2.INTER_CUBIC).augment(img)
out = imgaug.CenterCrop(self.target_shape).augment(out)
return out
def get_imagenet_dataflow(datadir,
is_train,
batch_size,
augmentors,
parallel=None):
"""
See explanations in the tutorial:
http://tensorpack.readthedocs.io/en/latest/tutorial/efficient-dataflow.html
"""
assert datadir is not None
assert isinstance(augmentors, list)
if parallel is None:
parallel = min(40, multiprocessing.cpu_count() // 2) # assuming hyperthreading
if is_train:
ds = dataset.ILSVRC12(datadir, "train", shuffle=True)
ds = AugmentImageComponent(ds, augmentors, copy=False)
if parallel < 16:
logging.warning("DataFlow may become the bottleneck when too few processes are used.")
ds = PrefetchDataZMQ(ds, parallel)
ds = BatchData(ds, batch_size, remainder=False)
else:
ds = dataset.ILSVRC12Files(datadir, "val", shuffle=False)
aug = imgaug.AugmentorList(augmentors)
def mapf(dp):
fname, cls = dp
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = np.flip(im, axis=2)
# print("fname={}".format(fname))
im = aug.augment(im)
return im, cls
ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
# ds = MapData(ds, mapf)
ds = BatchData(ds, batch_size, remainder=True)
ds = PrefetchDataZMQ(ds, 1)
# ds = PrefetchData(ds, 1)
return ds
def prepare_tf_context(num_gpus,
batch_size):
batch_size *= max(1, num_gpus)
return batch_size
def prepare_model(model_name,
use_pretrained,
pretrained_model_file_path,
data_format="channels_last"):
kwargs = {"pretrained": use_pretrained}
raw_net = get_model(
name=model_name,
data_format=data_format,
**kwargs)
input_image_size = raw_net.in_size[0] if hasattr(raw_net, "in_size") else 224
net = ImageNetModel(
model_lambda=raw_net,
image_size=input_image_size,
data_format=data_format)
if use_pretrained and not pretrained_model_file_path:
pretrained_model_file_path = raw_net.file_path
inputs_desc = None
if pretrained_model_file_path:
assert (os.path.isfile(pretrained_model_file_path))
logging.info("Loading model: {}".format(pretrained_model_file_path))
inputs_desc = get_model_loader(pretrained_model_file_path)
return net, inputs_desc
def get_data(is_train,
batch_size,
data_dir_path,
input_image_size=224,
resize_inv_factor=0.875):
assert (resize_inv_factor > 0.0)
resize_value = int(math.ceil(float(input_image_size) / resize_inv_factor))
if is_train:
augmentors = [
GoogleNetResize(
crop_area_fraction=0.08,
target_shape=input_image_size),
imgaug.RandomOrderAug([
imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(
0.1,
eigval=np.asarray([0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]], dtype="float32")[::-1, ::-1])]),
imgaug.Flip(horiz=True)]
else:
augmentors = [
# imgaug.ResizeShortestEdge(resize_value, cv2.INTER_CUBIC),
imgaug.ResizeShortestEdge(resize_value, cv2.INTER_LINEAR),
imgaug.CenterCrop((input_image_size, input_image_size))
]
return get_imagenet_dataflow(
datadir=data_dir_path,
is_train=is_train,
batch_size=batch_size,
augmentors=augmentors)
def calc_flops(model):
# manually build the graph with batch=1
input_desc = [
InputDesc(tf.float32, [1, model.image_size, model.image_size, 3], "input"),
InputDesc(tf.int32, [1], "label")
]
input = PlaceholderInput()
input.setup(input_desc)
with TowerContext("", is_training=False):
model.build_graph(*input.get_input_tensors())
model_utils.describe_trainable_vars()
tf.profiler.profile(
tf.get_default_graph(),
cmd="op",
options=tf.profiler.ProfileOptionBuilder.float_operation())
logger.info("Note that TensorFlow counts flops in a different way from the paper.")
logger.info("TensorFlow counts multiply+add as two flops, however the paper counts them "
"as 1 flop because it can be executed in one instruction.")
| 12,668 | 36.482249 | 108 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/shufflenetv2.py | """
ShuffleNet V2 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['ShuffleNetV2', 'shufflenetv2_wd2', 'shufflenetv2_w1', 'shufflenetv2_w3d2', 'shufflenetv2_w2']
import os
import tensorflow as tf
from .common import conv1x1, depthwise_conv3x3, conv1x1_block, conv3x3_block, batchnorm, channel_shuffle, maxpool2d,\
se_block, is_channels_first, get_channel_axis, flatten
def shuffle_unit(x,
in_channels,
out_channels,
downsample,
use_se,
use_residual,
training,
data_format,
name="shuffle_unit"):
"""
ShuffleNetV2 unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'shuffle_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // 2
if downsample:
y1 = depthwise_conv3x3(
x=x,
channels=in_channels,
strides=2,
data_format=data_format,
name=name + "/dw_conv4")
y1 = batchnorm(
x=y1,
training=training,
data_format=data_format,
name=name + "/dw_bn4")
y1 = conv1x1(
x=y1,
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name=name + "/expand_conv5/conv")
y1 = batchnorm(
x=y1,
training=training,
data_format=data_format,
name=name + "/expand_bn5")
y1 = tf.nn.relu(y1, name=name + "/expand_activ5")
x2 = x
else:
y1, x2 = tf.split(x, num_or_size_splits=2, axis=get_channel_axis(data_format))
y2 = conv1x1(
x=x2,
in_channels=(in_channels if downsample else mid_channels),
out_channels=mid_channels,
data_format=data_format,
name=name + "/compress_conv1/conv")
y2 = batchnorm(
x=y2,
training=training,
data_format=data_format,
name=name + "/compress_bn1")
y2 = tf.nn.relu(y2, name=name + "/compress_activ1")
y2 = depthwise_conv3x3(
x=y2,
channels=mid_channels,
strides=(2 if downsample else 1),
data_format=data_format,
name=name + "/dw_conv2")
y2 = batchnorm(
x=y2,
training=training,
data_format=data_format,
name=name + "/dw_bn2")
y2 = conv1x1(
x=y2,
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name=name + "/expand_conv3/conv")
y2 = batchnorm(
x=y2,
training=training,
data_format=data_format,
name=name + "/expand_bn3")
y2 = tf.nn.relu(y2, name=name + "/expand_activ3")
if use_se:
y2 = se_block(
x=y2,
channels=mid_channels,
data_format=data_format,
name=name + "/se")
if use_residual and not downsample:
y2 = y2 + x2
x = tf.concat([y1, y2], axis=get_channel_axis(data_format), name=name + "/concat")
assert (mid_channels % 2 == 0)
x = channel_shuffle(
x=x,
groups=2,
data_format=data_format)
return x
def shuffle_init_block(x,
in_channels,
out_channels,
training,
data_format,
name="shuffle_init_block"):
"""
ShuffleNetV2 specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'shuffle_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
training=training,
data_format=data_format,
name=name + "/conv")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=0,
ceil_mode=True,
data_format=data_format,
name=name + "/pool")
return x
class ShuffleNetV2(object):
"""
ShuffleNetV2 model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ShuffleNetV2, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.use_se = use_se
self.use_residual = use_residual
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = shuffle_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
x = shuffle_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=self.use_se,
use_residual=self.use_residual,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels,
training=training,
data_format=self.data_format,
name="features/final_block")
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_shufflenetv2(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ShuffleNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = ShuffleNetV2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def shufflenetv2_wd2(**kwargs):
"""
ShuffleNetV2 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2(width_scale=(12.0 / 29.0), model_name="shufflenetv2_wd2", **kwargs)
def shufflenetv2_w1(**kwargs):
"""
ShuffleNetV2 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2(width_scale=1.0, model_name="shufflenetv2_w1", **kwargs)
def shufflenetv2_w3d2(**kwargs):
"""
ShuffleNetV2 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2(width_scale=(44.0 / 29.0), model_name="shufflenetv2_w3d2", **kwargs)
def shufflenetv2_w2(**kwargs):
"""
ShuffleNetV2 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2(width_scale=(61.0 / 29.0), model_name="shufflenetv2_w2", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
shufflenetv2_wd2,
shufflenetv2_w1,
shufflenetv2_w3d2,
shufflenetv2_w2,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2_wd2 or weight_count == 1366792)
assert (model != shufflenetv2_w1 or weight_count == 2278604)
assert (model != shufflenetv2_w3d2 or weight_count == 4406098)
assert (model != shufflenetv2_w2 or weight_count == 7601686)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 14,880 | 29.745868 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/igcv3.py | """
IGCV3 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
"""
__all__ = ['IGCV3', 'igcv3_w1', 'igcv3_w3d4', 'igcv3_wd2', 'igcv3_wd4']
import os
import tensorflow as tf
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, channel_shuffle, is_channels_first, flatten
def inv_res_unit(x,
in_channels,
out_channels,
strides,
expansion,
training,
data_format,
name="inv_res_unit"):
"""
So-called 'Inverted Residual Unit' layer.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
expansion : bool
Whether do expansion of channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'inv_res_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * 6 if expansion else in_channels
groups = 2
if residual:
identity = x
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
groups=groups,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv1")
x = channel_shuffle(
x=x,
groups=groups,
data_format=data_format)
x = dwconv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation="relu6",
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv1x1_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv3")
if residual:
x = x + identity
return x
class IGCV3(object):
"""
IGCV3 model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(IGCV3, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
strides=2,
activation="relu6",
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
expansion = (i != 0) or (j != 0)
x = inv_res_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
expansion=expansion,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels,
activation="relu6",
training=training,
data_format=self.data_format,
name="features/final_block")
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_igcv3(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create IGCV3-D model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
final_block_channels = 1280
layers = [1, 4, 6, 8, 6, 6, 1]
downsample = [0, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 32, 64, 96, 160, 320]
from functools import reduce
channels = reduce(
lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample),
[[]])
if width_scale != 1.0:
def make_even(x):
return x if (x % 2 == 0) else x + 1
channels = [[make_even(int(cij * width_scale)) for cij in ci] for ci in channels]
init_block_channels = make_even(int(init_block_channels * width_scale))
if width_scale > 1.0:
final_block_channels = make_even(int(final_block_channels * width_scale))
net = IGCV3(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def igcv3_w1(**kwargs):
"""
IGCV3-D 1.0x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_igcv3(width_scale=1.0, model_name="igcv3_w1", **kwargs)
def igcv3_w3d4(**kwargs):
"""
IGCV3-D 0.75x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_igcv3(width_scale=0.75, model_name="igcv3_w3d4", **kwargs)
def igcv3_wd2(**kwargs):
"""
IGCV3-D 0.5x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_igcv3(width_scale=0.5, model_name="igcv3_wd2", **kwargs)
def igcv3_wd4(**kwargs):
"""
IGCV3-D 0.25x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_igcv3(width_scale=0.25, model_name="igcv3_wd4", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
igcv3_w1,
igcv3_w3d4,
igcv3_wd2,
igcv3_wd4,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != igcv3_w1 or weight_count == 3491688)
assert (model != igcv3_w3d4 or weight_count == 2638084)
assert (model != igcv3_wd2 or weight_count == 1985528)
assert (model != igcv3_wd4 or weight_count == 1534020)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 12,086 | 30.313472 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/preresnet.py | """
PreResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
"""
__all__ = ['PreResNet', 'preresnet10', 'preresnet12', 'preresnet14', 'preresnetbc14b', 'preresnet16', 'preresnet18_wd4',
'preresnet18_wd2', 'preresnet18_w3d4', 'preresnet18', 'preresnet26', 'preresnetbc26b', 'preresnet34',
'preresnetbc38b', 'preresnet50', 'preresnet50b', 'preresnet101', 'preresnet101b', 'preresnet152',
'preresnet152b', 'preresnet200', 'preresnet200b', 'preresnet269b', 'preres_block', 'preres_bottleneck_block',
'preres_init_block', 'preres_activation']
import os
import tensorflow as tf
from .common import pre_conv1x1_block, pre_conv3x3_block, conv2d, conv1x1, batchnorm, maxpool2d, is_channels_first,\
flatten
def preres_block(x,
in_channels,
out_channels,
strides,
training,
data_format,
name="preres_block"):
"""
Simple PreResNet block for residual path in PreResNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'preres_block'
Block name.
Returns:
-------
tuple of two Tensors
Resulted tensor and preactivated input tensor.
"""
x, x_pre_activ = pre_conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
return_preact=True,
training=training,
data_format=data_format,
name=name + "/conv1")
x = pre_conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
training=training,
data_format=data_format,
name=name + "/conv2")
return x, x_pre_activ
def preres_bottleneck_block(x,
in_channels,
out_channels,
strides,
conv1_stride,
training,
data_format,
name="preres_bottleneck_block"):
"""
PreResNet bottleneck block for residual path in PreResNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'preres_bottleneck_block'
Block name.
Returns:
-------
tuple of two Tensors
Resulted tensor and preactivated input tensor.
"""
mid_channels = out_channels // 4
x, x_pre_activ = pre_conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
strides=(strides if conv1_stride else 1),
return_preact=True,
training=training,
data_format=data_format,
name=name + "/conv1")
x = pre_conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
strides=(1 if conv1_stride else strides),
training=training,
data_format=data_format,
name=name + "/conv2")
x = pre_conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
training=training,
data_format=data_format,
name=name + "/conv3")
return x, x_pre_activ
def preres_unit(x,
in_channels,
out_channels,
strides,
bottleneck,
conv1_stride,
training,
data_format,
name="preres_unit"):
"""
PreResNet unit with residual connection.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'preres_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
identity = x
if bottleneck:
x, x_pre_activ = preres_bottleneck_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
training=training,
data_format=data_format,
name=name + "/body")
else:
x, x_pre_activ = preres_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
training=training,
data_format=data_format,
name=name + "/body")
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
identity = conv1x1(
x=x_pre_activ,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name=name + "/identity_conv/conv")
x = x + identity
return x
def preres_init_block(x,
in_channels,
out_channels,
training,
data_format,
name="preres_init_block"):
"""
PreResNet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'preres_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
use_bias=False,
data_format=data_format,
name=name + "/conv")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
x = tf.nn.relu(x, name=name + "/activ")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name=name + "/pool")
return x
def preres_activation(x,
training,
data_format,
name="preres_activation"):
"""
PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'preres_activation'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
x = tf.nn.relu(x, name=name + "/activ")
return x
class PreResNet(object):
"""
PreResNet model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(PreResNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.bottleneck = bottleneck
self.conv1_stride = conv1_stride
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = preres_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = preres_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=self.bottleneck,
conv1_stride=self.conv1_stride,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = preres_activation(
x=x,
training=training,
data_format=self.data_format,
name="features/post_activ")
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_preresnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create PreResNet or SE-PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = PreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def preresnet10(**kwargs):
"""
PreResNet-10 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=10, model_name="preresnet10", **kwargs)
def preresnet12(**kwargs):
"""
PreResNet-12 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=12, model_name="preresnet12", **kwargs)
def preresnet14(**kwargs):
"""
PreResNet-14 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=14, model_name="preresnet14", **kwargs)
def preresnetbc14b(**kwargs):
"""
PreResNet-BC-14b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="preresnetbc14b", **kwargs)
def preresnet16(**kwargs):
"""
PreResNet-16 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=16, model_name="preresnet16", **kwargs)
def preresnet18_wd4(**kwargs):
"""
PreResNet-18 model with 0.25 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=18, width_scale=0.25, model_name="preresnet18_wd4", **kwargs)
def preresnet18_wd2(**kwargs):
"""
PreResNet-18 model with 0.5 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=18, width_scale=0.5, model_name="preresnet18_wd2", **kwargs)
def preresnet18_w3d4(**kwargs):
"""
PreResNet-18 model with 0.75 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=18, width_scale=0.75, model_name="preresnet18_w3d4", **kwargs)
def preresnet18(**kwargs):
"""
PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=18, model_name="preresnet18", **kwargs)
def preresnet26(**kwargs):
"""
PreResNet-26 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=26, bottleneck=False, model_name="preresnet26", **kwargs)
def preresnetbc26b(**kwargs):
"""
PreResNet-BC-26b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="preresnetbc26b", **kwargs)
def preresnet34(**kwargs):
"""
PreResNet-34 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=34, model_name="preresnet34", **kwargs)
def preresnetbc38b(**kwargs):
"""
PreResNet-BC-38b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="preresnetbc38b", **kwargs)
def preresnet50(**kwargs):
"""
PreResNet-50 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=50, model_name="preresnet50", **kwargs)
def preresnet50b(**kwargs):
"""
PreResNet-50 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=50, conv1_stride=False, model_name="preresnet50b", **kwargs)
def preresnet101(**kwargs):
"""
PreResNet-101 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=101, model_name="preresnet101", **kwargs)
def preresnet101b(**kwargs):
"""
PreResNet-101 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=101, conv1_stride=False, model_name="preresnet101b", **kwargs)
def preresnet152(**kwargs):
"""
PreResNet-152 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=152, model_name="preresnet152", **kwargs)
def preresnet152b(**kwargs):
"""
PreResNet-152 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=152, conv1_stride=False, model_name="preresnet152b", **kwargs)
def preresnet200(**kwargs):
"""
PreResNet-200 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=200, model_name="preresnet200", **kwargs)
def preresnet200b(**kwargs):
"""
PreResNet-200 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=200, conv1_stride=False, model_name="preresnet200b", **kwargs)
def preresnet269b(**kwargs):
"""
PreResNet-269 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=269, conv1_stride=False, model_name="preresnet269b", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
preresnet10,
preresnet12,
preresnet14,
preresnetbc14b,
preresnet16,
preresnet18_wd4,
preresnet18_wd2,
preresnet18_w3d4,
preresnet18,
preresnet26,
preresnetbc26b,
preresnet34,
preresnetbc38b,
preresnet50,
preresnet50b,
preresnet101,
preresnet101b,
preresnet152,
preresnet152b,
preresnet200,
preresnet200b,
preresnet269b,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != preresnet10 or weight_count == 5417128)
assert (model != preresnet12 or weight_count == 5491112)
assert (model != preresnet14 or weight_count == 5786536)
assert (model != preresnetbc14b or weight_count == 10057384)
assert (model != preresnet16 or weight_count == 6967208)
assert (model != preresnet18_wd4 or weight_count == 3935960)
assert (model != preresnet18_wd2 or weight_count == 5802440)
assert (model != preresnet18_w3d4 or weight_count == 8473784)
assert (model != preresnet18 or weight_count == 11687848)
assert (model != preresnet26 or weight_count == 17958568)
assert (model != preresnetbc26b or weight_count == 15987624)
assert (model != preresnet34 or weight_count == 21796008)
assert (model != preresnetbc38b or weight_count == 21917864)
assert (model != preresnet50 or weight_count == 25549480)
assert (model != preresnet50b or weight_count == 25549480)
assert (model != preresnet101 or weight_count == 44541608)
assert (model != preresnet101b or weight_count == 44541608)
assert (model != preresnet152 or weight_count == 60185256)
assert (model != preresnet152b or weight_count == 60185256)
assert (model != preresnet200 or weight_count == 64666280)
assert (model != preresnet200b or weight_count == 64666280)
assert (model != preresnet269b or weight_count == 102065832)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 31,739 | 30.645065 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/shufflenetv2b.py | """
ShuffleNet V2 for ImageNet-1K, implemented in TensorFlow. The alternative variant.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['ShuffleNetV2b', 'shufflenetv2b_wd2', 'shufflenetv2b_w1', 'shufflenetv2b_w3d2', 'shufflenetv2b_w2']
import os
import tensorflow as tf
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, channel_shuffle, channel_shuffle2, maxpool2d,\
se_block, is_channels_first, get_channel_axis, flatten
def shuffle_unit(x,
in_channels,
out_channels,
downsample,
use_se,
use_residual,
shuffle_group_first,
training,
data_format,
name="shuffle_unit"):
"""
ShuffleNetV2(b) unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
shuffle_group_first : bool
Whether to use channel shuffle in group first mode.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'shuffle_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // 2
in_channels2 = in_channels // 2
assert (in_channels % 2 == 0)
if downsample:
y1 = dwconv3x3_block(
x=x,
in_channels=in_channels,
out_channels=in_channels,
strides=2,
activation=None,
training=training,
data_format=data_format,
name=name + "/shortcut_dconv")
y1 = conv1x1_block(
x=y1,
in_channels=in_channels,
out_channels=in_channels,
training=training,
data_format=data_format,
name=name + "/shortcut_conv")
x2 = x
else:
y1, x2 = tf.split(x, num_or_size_splits=2, axis=get_channel_axis(data_format))
y2_in_channels = (in_channels if downsample else in_channels2)
y2_out_channels = out_channels - y2_in_channels
y2 = conv1x1_block(
x=x2,
in_channels=y2_in_channels,
out_channels=mid_channels,
training=training,
data_format=data_format,
name=name + "/conv1")
y2 = dwconv3x3_block(
x=y2,
in_channels=mid_channels,
out_channels=mid_channels,
strides=(2 if downsample else 1),
activation=None,
training=training,
data_format=data_format,
name=name + "/dconv")
y2 = conv1x1_block(
x=y2,
in_channels=mid_channels,
out_channels=y2_out_channels,
training=training,
data_format=data_format,
name=name + "/conv2")
if use_se:
y2 = se_block(
x=y2,
channels=y2_out_channels,
data_format=data_format,
name=name + "/se")
if use_residual and not downsample:
assert (y2_out_channels == in_channels2)
y2 = y2 + x2
x = tf.concat([y1, y2], axis=get_channel_axis(data_format), name=name + "/concat")
assert (out_channels % 2 == 0)
if shuffle_group_first:
x = channel_shuffle(
x=x,
groups=2,
data_format=data_format)
else:
x = channel_shuffle2(
x=x,
groups=2,
data_format=data_format)
return x
def shuffle_init_block(x,
in_channels,
out_channels,
training,
data_format,
name="shuffle_init_block"):
"""
ShuffleNetV2(b) specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'shuffle_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
training=training,
data_format=data_format,
name=name + "/conv")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
ceil_mode=False,
data_format=data_format,
name=name + "/pool")
return x
class ShuffleNetV2b(object):
"""
ShuffleNetV2(b) model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
shuffle_group_first : bool, default True
Whether to use channel shuffle in group first mode.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
shuffle_group_first=True,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ShuffleNetV2b, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.use_se = use_se
self.use_residual = use_residual
self.shuffle_group_first = shuffle_group_first
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = shuffle_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
x = shuffle_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=self.use_se,
use_residual=self.use_residual,
shuffle_group_first=self.shuffle_group_first,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels,
training=training,
data_format=self.data_format,
name="features/final_block")
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_shufflenetv2b(width_scale,
shuffle_group_first=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ShuffleNetV2(b) model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
shuffle_group_first : bool, default True
Whether to use channel shuffle in group first mode.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = ShuffleNetV2b(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
shuffle_group_first=shuffle_group_first,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def shufflenetv2b_wd2(**kwargs):
"""
ShuffleNetV2(b) 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2b(
width_scale=(12.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_wd2",
**kwargs)
def shufflenetv2b_w1(**kwargs):
"""
ShuffleNetV2(b) 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2b(
width_scale=1.0,
shuffle_group_first=True,
model_name="shufflenetv2b_w1",
**kwargs)
def shufflenetv2b_w3d2(**kwargs):
"""
ShuffleNetV2(b) 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2b(
width_scale=(44.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_w3d2",
**kwargs)
def shufflenetv2b_w2(**kwargs):
"""
ShuffleNetV2(b) 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2b(
width_scale=(61.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_w2",
**kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
shufflenetv2b_wd2,
shufflenetv2b_w1,
shufflenetv2b_w3d2,
shufflenetv2b_w2,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2b_wd2 or weight_count == 1366792)
assert (model != shufflenetv2b_w1 or weight_count == 2279760)
assert (model != shufflenetv2b_w3d2 or weight_count == 4410194)
assert (model != shufflenetv2b_w2 or weight_count == 7611290)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 15,582 | 29.980119 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/menet.py | """
MENet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,'
https://arxiv.org/abs/1803.09127.
"""
__all__ = ['MENet', 'menet108_8x1_g3', 'menet128_8x1_g4', 'menet160_8x1_g8', 'menet228_12x1_g3', 'menet256_12x1_g4',
'menet348_12x1_g3', 'menet352_12x1_g8', 'menet456_24x1_g3']
import os
import tensorflow as tf
from .common import conv2d, conv1x1, conv3x3, depthwise_conv3x3, batchnorm, channel_shuffle, maxpool2d, avgpool2d,\
is_channels_first, get_channel_axis, flatten
def me_unit(x,
in_channels,
out_channels,
side_channels,
groups,
downsample,
ignore_group,
training,
data_format,
name="me_unit"):
"""
MENet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
side_channels : int
Number of side channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'me_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // 4
if downsample:
out_channels -= in_channels
identity = x
# pointwise group convolution 1
x = conv1x1(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
groups=(1 if ignore_group else groups),
data_format=data_format,
name=name + "/compress_conv1")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/compress_bn1")
x = tf.nn.relu(x, name=name + "/compress_activ")
assert (mid_channels % groups == 0)
x = channel_shuffle(
x=x,
groups=groups,
data_format=data_format)
# merging
y = conv1x1(
x=x,
in_channels=mid_channels,
out_channels=side_channels,
data_format=data_format,
name=name + "/s_merge_conv/conv")
y = batchnorm(
x=y,
training=training,
data_format=data_format,
name=name + "/s_merge_bn")
y = tf.nn.relu(y, name=name + "/s_merge_activ")
# depthwise convolution (bottleneck)
x = depthwise_conv3x3(
x=x,
channels=mid_channels,
strides=(2 if downsample else 1),
data_format=data_format,
name=name + "/dw_conv2")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/dw_bn2")
# evolution
y = conv3x3(
x=y,
in_channels=side_channels,
out_channels=side_channels,
strides=(2 if downsample else 1),
data_format=data_format,
name=name + "/s_conv")
y = batchnorm(
x=y,
training=training,
data_format=data_format,
name=name + "/s_conv_bn")
y = tf.nn.relu(y, name=name + "/s_conv_activ")
y = conv1x1(
x=y,
in_channels=side_channels,
out_channels=mid_channels,
data_format=data_format,
name=name + "/s_evolve_conv/conv")
y = batchnorm(
x=y,
training=training,
data_format=data_format,
name=name + "/s_evolve_bn")
y = tf.nn.sigmoid(y, name=name + "/s_evolve_activ")
x = x * y
# pointwise group convolution 2
x = conv1x1(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
data_format=data_format,
name=name + "/expand_conv3")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/expand_bn3")
if downsample:
identity = avgpool2d(
x=identity,
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name=name + "/avgpool")
x = tf.concat([x, identity], axis=get_channel_axis(data_format), name=name + "/concat")
else:
x = x + identity
x = tf.nn.relu(x, name=name + "/final_activ")
return x
def me_init_block(x,
in_channels,
out_channels,
training,
data_format,
name="me_init_block"):
"""
MENet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'me_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=2,
padding=1,
use_bias=False,
data_format=data_format,
name=name + "/conv")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
x = tf.nn.relu(x, name=name + "/activ")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name=name + "/pool")
return x
class MENet(object):
"""
MENet model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,'
https://arxiv.org/abs/1803.09127.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
side_channels : int
Number of side channels in a ME-unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
side_channels,
groups,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MENet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.side_channels = side_channels
self.groups = groups
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = me_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = (i == 0) and (j == 0)
x = me_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
side_channels=self.side_channels,
groups=self.groups,
downsample=downsample,
ignore_group=ignore_group,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_menet(first_stage_channels,
side_channels,
groups,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create MENet model with specific parameters.
Parameters:
----------
first_stage_channels : int
Number of output channels at the first stage.
side_channels : int
Number of side channels in a ME-unit.
groups : int
Number of groups in convolution layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
layers = [4, 8, 4]
if first_stage_channels == 108:
init_block_channels = 12
channels_per_layers = [108, 216, 432]
elif first_stage_channels == 128:
init_block_channels = 12
channels_per_layers = [128, 256, 512]
elif first_stage_channels == 160:
init_block_channels = 16
channels_per_layers = [160, 320, 640]
elif first_stage_channels == 228:
init_block_channels = 24
channels_per_layers = [228, 456, 912]
elif first_stage_channels == 256:
init_block_channels = 24
channels_per_layers = [256, 512, 1024]
elif first_stage_channels == 348:
init_block_channels = 24
channels_per_layers = [348, 696, 1392]
elif first_stage_channels == 352:
init_block_channels = 24
channels_per_layers = [352, 704, 1408]
elif first_stage_channels == 456:
init_block_channels = 48
channels_per_layers = [456, 912, 1824]
else:
raise ValueError("The {} of `first_stage_channels` is not supported".format(first_stage_channels))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = MENet(
channels=channels,
init_block_channels=init_block_channels,
side_channels=side_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def menet108_8x1_g3(**kwargs):
"""
108-MENet-8x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=108, side_channels=8, groups=3, model_name="menet108_8x1_g3", **kwargs)
def menet128_8x1_g4(**kwargs):
"""
128-MENet-8x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=128, side_channels=8, groups=4, model_name="menet128_8x1_g4", **kwargs)
def menet160_8x1_g8(**kwargs):
"""
160-MENet-8x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=160, side_channels=8, groups=8, model_name="menet160_8x1_g8", **kwargs)
def menet228_12x1_g3(**kwargs):
"""
228-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=228, side_channels=12, groups=3, model_name="menet228_12x1_g3", **kwargs)
def menet256_12x1_g4(**kwargs):
"""
256-MENet-12x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=256, side_channels=12, groups=4, model_name="menet256_12x1_g4", **kwargs)
def menet348_12x1_g3(**kwargs):
"""
348-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=348, side_channels=12, groups=3, model_name="menet348_12x1_g3", **kwargs)
def menet352_12x1_g8(**kwargs):
"""
352-MENet-12x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=352, side_channels=12, groups=8, model_name="menet352_12x1_g8", **kwargs)
def menet456_24x1_g3(**kwargs):
"""
456-MENet-24x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=456, side_channels=24, groups=3, model_name="menet456_24x1_g3", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
menet108_8x1_g3,
menet128_8x1_g4,
menet160_8x1_g8,
menet228_12x1_g3,
menet256_12x1_g4,
menet348_12x1_g3,
menet352_12x1_g8,
menet456_24x1_g3,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != menet108_8x1_g3 or weight_count == 654516)
assert (model != menet128_8x1_g4 or weight_count == 750796)
assert (model != menet160_8x1_g8 or weight_count == 850120)
assert (model != menet228_12x1_g3 or weight_count == 1806568)
assert (model != menet256_12x1_g4 or weight_count == 1888240)
assert (model != menet348_12x1_g3 or weight_count == 3368128)
assert (model != menet352_12x1_g8 or weight_count == 2272872)
assert (model != menet456_24x1_g3 or weight_count == 5304784)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 19,323 | 29.86901 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/channelnet.py | """
ChannelNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise Convolutions,'
https://arxiv.org/abs/1809.01330.
"""
__all__ = ['ChannelNet', 'channelnet']
import os
import tensorflow as tf
from .common import conv2d, batchnorm, is_channels_first, get_channel_axis, flatten
def dwconv3x3(x,
in_channels,
out_channels,
strides,
use_bias=False,
data_format="channels_last",
name="dwconv3x3"):
"""
3x3 depthwise version of the standard convolution layer.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'dwconv3x3'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=1,
groups=out_channels,
use_bias=use_bias,
data_format=data_format,
name=name)
def channet_conv(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
dropout_rate=0.0,
activate=True,
training=False,
data_format="channels_last",
name="channet_conv"):
"""
ChannelNet specific convolution block with Batch normalization and ReLU6 activation.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'channet_conv'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name=name + "/conv")
if dropout_rate > 0.0:
x = tf.keras.layers.Dropout(
rate=dropout_rate,
name=name + "/dropout")(
inputs=x,
training=training)
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
if activate:
x = tf.nn.relu6(x, name=name + "/activ")
return x
def channet_conv1x1(x,
in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
dropout_rate=0.0,
activate=True,
training=False,
data_format="channels_last",
name="channet_conv1x1"):
"""
1x1 version of ChannelNet specific convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'channet_conv1x1'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return channet_conv(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=groups,
use_bias=use_bias,
dropout_rate=dropout_rate,
activate=activate,
training=training,
data_format=data_format,
name=name)
def channet_conv3x3(x,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
groups=1,
use_bias=False,
dropout_rate=0.0,
activate=True,
training=False,
data_format="channels_last",
name="channet_conv3x3"):
"""
3x3 version of ChannelNet specific convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'channet_conv3x3'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return channet_conv(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
dropout_rate=dropout_rate,
activate=activate,
training=training,
data_format=data_format,
name=name)
def channet_dws_conv_block(x,
in_channels,
out_channels,
strides,
groups=1,
dropout_rate=0.0,
training=False,
data_format="channels_last",
name="channet_dws_conv_block"):
"""
ChannelNet specific depthwise separable convolution block with BatchNorms and activations at last convolution
layers.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
groups : int, default 1
Number of groups.
dropout_rate : float, default 0.0
Dropout rate.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'channet_dws_conv_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = dwconv3x3(
x=x,
in_channels=in_channels,
out_channels=in_channels,
strides=strides,
data_format=data_format,
name=name + '/dw_conv')
x = channet_conv1x1(
x=x,
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
dropout_rate=dropout_rate,
training=training,
data_format=data_format,
name=name + '/pw_conv')
return x
def simple_group_block(x,
channels,
multi_blocks,
groups,
dropout_rate,
training,
data_format,
name="simple_group_block"):
"""
ChannelNet specific block with a sequence of depthwise separable group convolution layers.
Parameters:
----------
x : Tensor
Input tensor.
channels : int
Number of input/output channels.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'simple_group_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
# assert (channels == x.shape[1].value)
for i in range(multi_blocks):
x = channet_dws_conv_block(
x=x,
in_channels=channels,
out_channels=channels,
strides=1,
groups=groups,
dropout_rate=dropout_rate,
training=training,
data_format=data_format,
name=name + '/block{}'.format(i + 1))
return x
def channelwise_conv2d(x,
groups,
dropout_rate,
training=False,
data_format="channels_last",
name="pure_conv2d"):
"""
ChannelNet specific block with channel-wise convolution.
Parameters:
----------
x : Tensor
Input tensor.
dropout_rate : float
Dropout rate.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'channelwise_conv2d'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = tf.expand_dims(x, axis=get_channel_axis(data_format), name=name + '/expand_dims')
filters = groups
kernel_size = [4 * groups, 1, 1]
strides = [groups, 1, 1]
x = tf.keras.layers.Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding="same",
data_format=data_format,
use_bias=False,
name=name + '/conv')(x)
if dropout_rate > 0.0:
x = tf.keras.layers.Dropout(
rate=dropout_rate,
name=name + "/dropout")(
inputs=x,
training=training)
if filters == 1:
x = tf.squeeze(x, axis=[get_channel_axis(data_format)], name=name + '/squeeze')
x = tf.unstack(x, axis=get_channel_axis(data_format), name=name + '/unstack')
x = tf.concat(x, axis=get_channel_axis(data_format), name=name + "/concat")
return x
def conv_group_block(x,
channels,
multi_blocks,
groups,
dropout_rate,
training,
data_format,
name="conv_group_block"):
"""
ChannelNet specific block with a combination of channel-wise convolution, depthwise separable group convolutions.
Parameters:
----------
x : Tensor
Input tensor.
channels : int
Number of input/output channels.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'conv_group_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (channels == x.shape[1].value)
assert (channels % groups == 0)
x = channelwise_conv2d(
x=x,
groups=groups,
dropout_rate=dropout_rate,
training=training,
data_format=data_format,
name=name + '/conv')
x = simple_group_block(
x=x,
channels=channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate,
training=training,
data_format=data_format,
name=name)
return x
def channet_unit(x,
in_channels,
out_channels_list,
strides,
multi_blocks,
groups,
dropout_rate,
block_names,
merge_type,
training,
data_format,
name="channet_unit"):
"""
ChannelNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels_list : tuple/list of 2 int
Number of output channels for each sub-block.
strides : int or tuple/list of 2 int
Strides of the convolution.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
block_names : tuple/list of 2 str
Sub-block names.
merge_type : str
Type of sub-block output merging.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'channet_unit'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (len(block_names) == 2)
assert (merge_type in ["seq", "add", "cat"])
x_outs = []
for i, (out_channels, block_name) in enumerate(zip(out_channels_list, block_names)):
strides_i = (strides if i == 0 else 1)
name_i = name + '/block{}'.format(i + 1)
assert (x.shape[1].value == in_channels)
if block_name == "channet_conv3x3":
x = channet_conv3x3(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides_i,
dropout_rate=dropout_rate,
activate=False,
training=training,
data_format=data_format,
name=name_i)
elif block_name == "channet_dws_conv_block":
x = channet_dws_conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides_i,
dropout_rate=dropout_rate,
training=training,
data_format=data_format,
name=name_i)
elif block_name == "simple_group_block":
x = simple_group_block(
x=x,
channels=in_channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate,
training=training,
data_format=data_format,
name=name_i)
elif block_name == "conv_group_block":
x = conv_group_block(
x=x,
channels=in_channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate,
training=training,
data_format=data_format,
name=name_i)
else:
raise NotImplementedError()
x_outs = x_outs + [x]
in_channels = out_channels
if merge_type == "seq":
x = x_outs[-1]
elif merge_type == "add":
x = tf.add(*x_outs, name=name + '/add')
elif merge_type == "cat":
x = tf.concat(x_outs, axis=get_channel_axis(data_format), name=name + '/cat')
else:
raise NotImplementedError()
return x
class ChannelNet(object):
"""
ChannelNet model from 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise
Convolutions,' https://arxiv.org/abs/1809.01330.
Parameters:
----------
channels : list of list of list of int
Number of output channels for each unit.
block_names : list of list of list of str
Names of blocks for each unit.
block_names : list of list of str
Merge types for each unit.
dropout_rate : float, default 0.0001
Dropout rate.
multi_blocks : int, default 2
Block count architectural parameter.
groups : int, default 2
Group count architectural parameter.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
block_names,
merge_types,
dropout_rate=0.0001,
multi_blocks=2,
groups=2,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ChannelNet, self).__init__(**kwargs)
# assert (data_format in ["channels_last", "channels_first"])
assert (data_format in ["channels_first"])
self.channels = channels
self.block_names = block_names
self.merge_types = merge_types
self.dropout_rate = dropout_rate
self.multi_blocks = multi_blocks
self.groups = groups
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
x = channet_unit(
x=x,
in_channels=in_channels,
out_channels_list=out_channels,
strides=strides,
multi_blocks=self.multi_blocks,
groups=self.groups,
dropout_rate=self.dropout_rate,
block_names=self.block_names[i][j],
merge_type=self.merge_types[i][j],
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
if self.merge_types[i][j] == "cat":
in_channels = sum(out_channels)
else:
in_channels = out_channels[-1]
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_channelnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ChannelNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
channels = [[[32, 64]], [[128, 128]], [[256, 256]], [[512, 512], [512, 512]], [[1024, 1024]]]
block_names = [[["channet_conv3x3", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "simple_group_block"], ["conv_group_block", "conv_group_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]]]
merge_types = [["cat"], ["cat"], ["cat"], ["add", "add"], ["seq"]]
net = ChannelNet(
channels=channels,
block_names=block_names,
merge_types=merge_types,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def channelnet(**kwargs):
"""
ChannelNet model from 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise
Convolutions,' https://arxiv.org/abs/1809.01330.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_channelnet(model_name="channelnet", **kwargs)
def _test():
import numpy as np
data_format = "channels_first"
pretrained = False
models = [
channelnet,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != channelnet or weight_count == 3875112)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 24,927 | 30.16 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/squeezenext.py | """
SqueezeNext for ImageNet-1K, implemented in TensorFlow.
Original paper: 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
"""
__all__ = ['SqueezeNext', 'sqnxt23_w1', 'sqnxt23_w3d2', 'sqnxt23_w2', 'sqnxt23v5_w1', 'sqnxt23v5_w3d2', 'sqnxt23v5_w2']
import os
import tensorflow as tf
from .common import maxpool2d, conv_block, conv1x1_block, conv7x7_block, is_channels_first, flatten
def sqnxt_unit(x,
in_channels,
out_channels,
strides,
training,
data_format,
name="sqnxt_unit"):
"""
SqueezeNext unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'sqnxt_unit'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
if strides == 2:
reduction_den = 1
resize_identity = True
elif in_channels > out_channels:
reduction_den = 4
resize_identity = True
else:
reduction_den = 2
resize_identity = False
if resize_identity:
identity = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=True,
training=training,
data_format=data_format,
name=name + "/identity_conv")
else:
identity = x
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=(in_channels // reduction_den),
strides=strides,
use_bias=True,
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv1x1_block(
x=x,
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // (2 * reduction_den)),
use_bias=True,
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv_block(
x=x,
in_channels=(in_channels // (2 * reduction_den)),
out_channels=(in_channels // reduction_den),
kernel_size=(1, 3),
strides=1,
padding=(0, 1),
use_bias=True,
training=training,
data_format=data_format,
name=name + "/conv3")
x = conv_block(
x=x,
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // reduction_den),
kernel_size=(3, 1),
strides=1,
padding=(1, 0),
use_bias=True,
training=training,
data_format=data_format,
name=name + "/conv4")
x = conv1x1_block(
x=x,
in_channels=(in_channels // reduction_den),
out_channels=out_channels,
use_bias=True,
training=training,
data_format=data_format,
name=name + "/conv5")
x = x + identity
x = tf.nn.relu(x, name=name + "/final_activ")
return x
def sqnxt_init_block(x,
in_channels,
out_channels,
training,
data_format,
name="sqnxt_init_block"):
"""
ResNet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'sqnxt_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv7x7_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=1,
use_bias=True,
training=training,
data_format=data_format,
name=name + "/conv")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
ceil_mode=True,
data_format=data_format,
name=name + "/pool")
return x
class SqueezeNext(object):
"""
SqueezeNext model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SqueezeNext, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = sqnxt_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = sqnxt_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels,
use_bias=True,
training=training,
data_format=self.data_format,
name="features/final_block")
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_squeezenext(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SqueezeNext model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('23' or '23v5').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
init_block_channels = 64
final_block_channels = 128
channels_per_layers = [32, 64, 128, 256]
if version == '23':
layers = [6, 6, 8, 1]
elif version == '23v5':
layers = [2, 4, 14, 1]
else:
raise ValueError("Unsupported SqueezeNet version {}".format(version))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
final_block_channels = int(final_block_channels * width_scale)
net = SqueezeNext(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def sqnxt23_w1(**kwargs):
"""
1.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenext(version="23", width_scale=1.0, model_name="sqnxt23_w1", **kwargs)
def sqnxt23_w3d2(**kwargs):
"""
1.5-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenext(version="23", width_scale=1.5, model_name="sqnxt23_w3d2", **kwargs)
def sqnxt23_w2(**kwargs):
"""
2.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenext(version="23", width_scale=2.0, model_name="sqnxt23_w2", **kwargs)
def sqnxt23v5_w1(**kwargs):
"""
1.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenext(version="23v5", width_scale=1.0, model_name="sqnxt23v5_w1", **kwargs)
def sqnxt23v5_w3d2(**kwargs):
"""
1.5-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenext(version="23v5", width_scale=1.5, model_name="sqnxt23v5_w3d2", **kwargs)
def sqnxt23v5_w2(**kwargs):
"""
2.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenext(version="23v5", width_scale=2.0, model_name="sqnxt23v5_w2", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
sqnxt23_w1,
sqnxt23_w3d2,
sqnxt23_w2,
sqnxt23v5_w1,
sqnxt23v5_w3d2,
sqnxt23v5_w2,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sqnxt23_w1 or weight_count == 724056)
assert (model != sqnxt23_w3d2 or weight_count == 1511824)
assert (model != sqnxt23_w2 or weight_count == 2583752)
assert (model != sqnxt23v5_w1 or weight_count == 921816)
assert (model != sqnxt23v5_w3d2 or weight_count == 1953616)
assert (model != sqnxt23v5_w2 or weight_count == 3366344)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 15,382 | 29.704591 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/resnet.py | """
ResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2',
'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b',
'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'res_block',
'res_bottleneck_block', 'res_unit', 'res_init_block']
import os
import tensorflow as tf
from .common import conv1x1_block, conv3x3_block, conv7x7_block, maxpool2d, is_channels_first, flatten
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
def res_block(x,
in_channels,
out_channels,
strides,
training,
data_format,
name="res_block"):
"""
Simple ResNet block for residual path in ResNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'res_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv2")
return x
def res_bottleneck_block(x,
in_channels,
out_channels,
strides,
conv1_stride=False,
bottleneck_factor=4,
training=False,
data_format="channels_last",
name="res_bottleneck_block"):
"""
ResNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
bottleneck_factor : int, default 4
Bottleneck factor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'res_bottleneck_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // bottleneck_factor
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
strides=(strides if conv1_stride else 1),
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
strides=(1 if conv1_stride else strides),
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv3")
return x
def res_unit(x,
in_channels,
out_channels,
strides,
bottleneck,
conv1_stride,
training,
data_format,
name="res_unit"):
"""
ResNet unit with residual connection.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'res_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
identity = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
training=training,
data_format=data_format,
name=name + "/identity_conv")
else:
identity = x
if bottleneck:
x = res_bottleneck_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
training=training,
data_format=data_format,
name=name + "/body")
else:
x = res_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
training=training,
data_format=data_format,
name=name + "/body")
x = x + identity
x = tf.nn.relu(x, name=name + "/activ")
return x
def res_init_block(x,
in_channels,
out_channels,
training,
data_format,
name):
"""
ResNet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'res_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv7x7_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
training=training,
data_format=data_format,
name=name + "/conv")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name=name + "/pool")
return x
class ResNet(object):
"""
ResNet model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ResNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.bottleneck = bottleneck
self.conv1_stride = conv1_stride
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = res_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = res_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=self.bottleneck,
conv1_stride=self.conv1_stride,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_resnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ResNet or SE-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def resnet10(**kwargs):
"""
ResNet-10 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=10, model_name="resnet10", **kwargs)
def resnet12(**kwargs):
"""
ResNet-12 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=12, model_name="resnet12", **kwargs)
def resnet14(**kwargs):
"""
ResNet-14 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=14, model_name="resnet14", **kwargs)
def resnetbc14b(**kwargs):
"""
ResNet-BC-14b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs)
def resnet16(**kwargs):
"""
ResNet-16 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=16, model_name="resnet16", **kwargs)
def resnet18_wd4(**kwargs):
"""
ResNet-18 model with 0.25 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs)
def resnet18_wd2(**kwargs):
"""
ResNet-18 model with 0.5 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs)
def resnet18_w3d4(**kwargs):
"""
ResNet-18 model with 0.75 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs)
def resnet18(**kwargs):
"""
ResNet-18 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=18, model_name="resnet18", **kwargs)
def resnet26(**kwargs):
"""
ResNet-26 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs)
def resnetbc26b(**kwargs):
"""
ResNet-BC-26b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs)
def resnet34(**kwargs):
"""
ResNet-34 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=34, model_name="resnet34", **kwargs)
def resnetbc38b(**kwargs):
"""
ResNet-BC-38b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b", **kwargs)
def resnet50(**kwargs):
"""
ResNet-50 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=50, model_name="resnet50", **kwargs)
def resnet50b(**kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs)
def resnet101(**kwargs):
"""
ResNet-101 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=101, model_name="resnet101", **kwargs)
def resnet101b(**kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs)
def resnet152(**kwargs):
"""
ResNet-152 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=152, model_name="resnet152", **kwargs)
def resnet152b(**kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs)
def resnet200(**kwargs):
"""
ResNet-200 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=200, model_name="resnet200", **kwargs)
def resnet200b(**kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs)
def _test():
import numpy as np
# import logging
# logging.getLogger("tensorflow").disabled = True
data_format = "channels_last"
pretrained = False
models = [
resnet10,
resnet12,
resnet14,
resnetbc14b,
resnet16,
resnet18_wd4,
resnet18_wd2,
resnet18_w3d4,
resnet18,
resnet26,
resnetbc26b,
resnet34,
resnetbc38b,
resnet50,
resnet50b,
resnet101,
resnet101b,
resnet152,
resnet152b,
resnet200,
resnet200b,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10 or weight_count == 5418792)
assert (model != resnet12 or weight_count == 5492776)
assert (model != resnet14 or weight_count == 5788200)
assert (model != resnetbc14b or weight_count == 10064936)
assert (model != resnet16 or weight_count == 6968872)
assert (model != resnet18_wd4 or weight_count == 3937400)
assert (model != resnet18_wd2 or weight_count == 5804296)
assert (model != resnet18_w3d4 or weight_count == 8476056)
assert (model != resnet18 or weight_count == 11689512)
assert (model != resnet26 or weight_count == 17960232)
assert (model != resnetbc26b or weight_count == 15995176)
assert (model != resnet34 or weight_count == 21797672)
assert (model != resnetbc38b or weight_count == 21925416)
assert (model != resnet50 or weight_count == 25557032)
assert (model != resnet50b or weight_count == 25557032)
assert (model != resnet101 or weight_count == 44549160)
assert (model != resnet101b or weight_count == 44549160)
assert (model != resnet152 or weight_count == 60192808)
assert (model != resnet152b or weight_count == 60192808)
assert (model != resnet200 or weight_count == 64673832)
assert (model != resnet200b or weight_count == 64673832)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 29,772 | 29.85285 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/mobilenetv2.py | """
MobileNetV2 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381.
"""
__all__ = ['MobileNetV2', 'mobilenetv2_w1', 'mobilenetv2_w3d4', 'mobilenetv2_wd2', 'mobilenetv2_wd4']
import os
import tensorflow as tf
from .common import conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, is_channels_first, flatten
def linear_bottleneck(x,
in_channels,
out_channels,
strides,
expansion,
training,
data_format,
name="linear_bottleneck"):
"""
So-called 'Linear Bottleneck' layer. It is used as a MobileNetV2 unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
expansion : bool
Whether do expansion of channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'linear_bottleneck'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * 6 if expansion else in_channels
if residual:
identity = x
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
activation="relu6",
training=training,
data_format=data_format,
name=name + "/conv1")
x = dwconv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation="relu6",
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv1x1_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv3")
if residual:
x = x + identity
return x
class MobileNetV2(object):
"""
MobileNetV2 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MobileNetV2, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
strides=2,
activation="relu6",
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
expansion = (i != 0) or (j != 0)
x = linear_bottleneck(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
expansion=expansion,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels,
activation="relu6",
training=training,
data_format=self.data_format,
name="features/final_block")
in_channels = self.final_block_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
x = conv1x1(
x=x,
in_channels=in_channels,
out_channels=self.classes,
use_bias=False,
data_format=self.data_format,
name="output")
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
return x
def get_mobilenetv2(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create MobileNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
init_block_channels = 32
final_block_channels = 1280
layers = [1, 2, 3, 4, 3, 3, 1]
downsample = [0, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 32, 64, 96, 160, 320]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [[]])
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
if width_scale > 1.0:
final_block_channels = int(final_block_channels * width_scale)
net = MobileNetV2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def mobilenetv2_w1(**kwargs):
"""
1.0 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenetv2(width_scale=1.0, model_name="mobilenetv2_w1", **kwargs)
def mobilenetv2_w3d4(**kwargs):
"""
0.75 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenetv2(width_scale=0.75, model_name="mobilenetv2_w3d4", **kwargs)
def mobilenetv2_wd2(**kwargs):
"""
0.5 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenetv2(width_scale=0.5, model_name="mobilenetv2_wd2", **kwargs)
def mobilenetv2_wd4(**kwargs):
"""
0.25 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenetv2(width_scale=0.25, model_name="mobilenetv2_wd4", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
mobilenetv2_w1,
mobilenetv2_w3d4,
mobilenetv2_wd2,
mobilenetv2_wd4,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetv2_w1 or weight_count == 3504960)
assert (model != mobilenetv2_w3d4 or weight_count == 2627592)
assert (model != mobilenetv2_wd2 or weight_count == 1964736)
assert (model != mobilenetv2_wd4 or weight_count == 1516392)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 12,232 | 30.939948 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/squeezenet.py | """
SqueezeNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,'
https://arxiv.org/abs/1602.07360.
"""
__all__ = ['SqueezeNet', 'squeezenet_v1_0', 'squeezenet_v1_1', 'squeezeresnet_v1_0', 'squeezeresnet_v1_1']
import os
import tensorflow as tf
from .common import conv2d, maxpool2d, is_channels_first, get_channel_axis, flatten
def fire_conv(x,
in_channels,
out_channels,
kernel_size,
padding,
data_format,
name="fire_conv"):
"""
SqueezeNet specific convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'fire_conv'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
use_bias=True,
data_format=data_format,
name=name + "/conv")
x = tf.nn.relu(x, name=name + "/activ")
return x
def fire_unit(x,
in_channels,
squeeze_channels,
expand1x1_channels,
expand3x3_channels,
residual,
data_format,
name="fire_unit"):
"""
SqueezeNet unit, so-called 'Fire' unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
squeeze_channels : int
Number of output channels for squeeze convolution blocks.
expand1x1_channels : int
Number of output channels for expand 1x1 convolution blocks.
expand3x3_channels : int
Number of output channels for expand 3x3 convolution blocks.
residual : bool
Whether use residual connection.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'fire_unit'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
if residual:
identity = x
x = fire_conv(
x=x,
in_channels=in_channels,
out_channels=squeeze_channels,
kernel_size=1,
padding=0,
data_format=data_format,
name=name + "/squeeze")
y1 = fire_conv(
x=x,
in_channels=squeeze_channels,
out_channels=expand1x1_channels,
kernel_size=1,
padding=0,
data_format=data_format,
name=name + "/expand1x1")
y2 = fire_conv(
x=x,
in_channels=squeeze_channels,
out_channels=expand3x3_channels,
kernel_size=3,
padding=1,
data_format=data_format,
name=name + "/expand3x3")
out = tf.concat([y1, y2], axis=get_channel_axis(data_format), name=name + "/concat")
if residual:
out = out + identity
return out
def squeeze_init_block(x,
in_channels,
out_channels,
kernel_size,
data_format,
name="squeeze_init_block"):
"""
ResNet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'squeeze_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=2,
use_bias=True,
data_format=data_format,
name=name + "/conv")
x = tf.nn.relu(x, name=name + "/activ")
return x
class SqueezeNet(object):
"""
SqueezeNet model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,'
https://arxiv.org/abs/1602.07360.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
residuals : bool
Whether to use residual units.
init_block_kernel_size : int or tuple/list of 2 int
The dimensions of the convolution window for the initial unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
residuals,
init_block_kernel_size,
init_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SqueezeNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.residuals = residuals
self.init_block_kernel_size = init_block_kernel_size
self.init_block_channels = init_block_channels
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = squeeze_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
kernel_size=self.init_block_kernel_size,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
ceil_mode=True,
data_format=self.data_format,
name="features/pool{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
expand_channels = out_channels // 2
squeeze_channels = out_channels // 8
x = fire_unit(
x=x,
in_channels=in_channels,
squeeze_channels=squeeze_channels,
expand1x1_channels=expand_channels,
expand3x3_channels=expand_channels,
residual=((self.residuals is not None) and (self.residuals[i][j] == 1)),
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.Dropout(
rate=0.5,
name="features/dropout")(
inputs=x,
training=training)
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=self.classes,
kernel_size=1,
data_format=self.data_format,
name="output/final_conv")
x = tf.nn.relu(x, name="output/final_activ")
x = tf.keras.layers.AveragePooling2D(
pool_size=13,
strides=1,
data_format=self.data_format,
name="output/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
return x
def get_squeezenet(version,
residual=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SqueezeNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('1.0' or '1.1').
residual : bool, default False
Whether to use residual connections.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if version == '1.0':
channels = [[128, 128, 256], [256, 384, 384, 512], [512]]
residuals = [[0, 1, 0], [1, 0, 1, 0], [1]]
init_block_kernel_size = 7
init_block_channels = 96
elif version == '1.1':
channels = [[128, 128], [256, 256], [384, 384, 512, 512]]
residuals = [[0, 1], [0, 1], [0, 1, 0, 1]]
init_block_kernel_size = 3
init_block_channels = 64
else:
raise ValueError("Unsupported SqueezeNet version {}".format(version))
if not residual:
residuals = None
net = SqueezeNet(
channels=channels,
residuals=residuals,
init_block_kernel_size=init_block_kernel_size,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def squeezenet_v1_0(**kwargs):
"""
SqueezeNet 'vanilla' model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenet(version="1.0", residual=False, model_name="squeezenet_v1_0", **kwargs)
def squeezenet_v1_1(**kwargs):
"""
SqueezeNet v1.1 model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenet(version="1.1", residual=False, model_name="squeezenet_v1_1", **kwargs)
def squeezeresnet_v1_0(**kwargs):
"""
SqueezeNet model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and
<0.5MB model size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenet(version="1.0", residual=True, model_name="squeezeresnet_v1_0", **kwargs)
def squeezeresnet_v1_1(**kwargs):
"""
SqueezeNet v1.1 model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenet(version="1.1", residual=True, model_name="squeezeresnet_v1_1", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
squeezenet_v1_0,
squeezenet_v1_1,
squeezeresnet_v1_0,
squeezeresnet_v1_1,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != squeezenet_v1_0 or weight_count == 1248424)
assert (model != squeezenet_v1_1 or weight_count == 1235496)
assert (model != squeezeresnet_v1_0 or weight_count == 1248424)
assert (model != squeezeresnet_v1_1 or weight_count == 1235496)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 14,788 | 29.810417 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/vgg.py | """
VGG for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
"""
__all__ = ['VGG', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'bn_vgg11', 'bn_vgg13', 'bn_vgg16', 'bn_vgg19', 'bn_vgg11b',
'bn_vgg13b', 'bn_vgg16b', 'bn_vgg19b']
import os
import tensorflow as tf
from .common import conv3x3_block, maxpool2d, is_channels_first, flatten
def vgg_dense(x,
in_channels,
out_channels,
training,
name="vgg_dense"):
"""
VGG specific dense block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
name : str, default 'vgg_dense'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (in_channels > 0)
x = tf.keras.layers.Dense(
units=out_channels,
name=name + "/fc")(x)
x = tf.nn.relu(x, name=name + "/activ")
x = tf.keras.layers.Dropout(
rate=0.5,
name=name + "/dropout")(
inputs=x,
training=training)
return x
def vgg_output_block(x,
in_channels,
classes,
training,
name="vgg_output_block"):
"""
VGG specific output block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
name : str, default 'vgg_output_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = 4096
x = vgg_dense(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
training=training,
name=name + "/fc1")
x = vgg_dense(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
training=training,
name=name + "/fc2")
x = tf.keras.layers.Dense(
units=classes,
name=name + "/fc3")(x)
return x
class VGG(object):
"""
VGG models from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default False
Whether to use BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
use_bias=True,
use_bn=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(VGG, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.use_bias = use_bias
self.use_bn = use_bn
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
use_bias=self.use_bias,
use_bn=self.use_bn,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = maxpool2d(
x=x,
pool_size=2,
strides=2,
padding=0,
data_format=self.data_format,
name="features/stage{}/pool".format(i + 1))
in_channels = in_channels * 7 * 7
# x = tf.reshape(x, [-1, in_channels])
x = flatten(
x=x,
data_format=self.data_format)
x = vgg_output_block(
x=x,
in_channels=in_channels,
classes=self.classes,
training=training,
name="output")
return x
def get_vgg(blocks,
use_bias=True,
use_bn=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create VGG model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default False
Whether to use BatchNorm layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 11:
layers = [1, 1, 2, 2, 2]
elif blocks == 13:
layers = [2, 2, 2, 2, 2]
elif blocks == 16:
layers = [2, 2, 3, 3, 3]
elif blocks == 19:
layers = [2, 2, 4, 4, 4]
else:
raise ValueError("Unsupported VGG with number of blocks: {}".format(blocks))
channels_per_layers = [64, 128, 256, 512, 512]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = VGG(
channels=channels,
use_bias=use_bias,
use_bn=use_bn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def vgg11(**kwargs):
"""
VGG-11 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, model_name="vgg11", **kwargs)
def vgg13(**kwargs):
"""
VGG-13 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, model_name="vgg13", **kwargs)
def vgg16(**kwargs):
"""
VGG-16 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, model_name="vgg16", **kwargs)
def vgg19(**kwargs):
"""
VGG-19 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, model_name="vgg19", **kwargs)
def bn_vgg11(**kwargs):
"""
VGG-11 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, use_bias=False, use_bn=True, model_name="bn_vgg11", **kwargs)
def bn_vgg13(**kwargs):
"""
VGG-13 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, use_bias=False, use_bn=True, model_name="bn_vgg13", **kwargs)
def bn_vgg16(**kwargs):
"""
VGG-16 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, use_bias=False, use_bn=True, model_name="bn_vgg16", **kwargs)
def bn_vgg19(**kwargs):
"""
VGG-19 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, use_bias=False, use_bn=True, model_name="bn_vgg19", **kwargs)
def bn_vgg11b(**kwargs):
"""
VGG-11 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, use_bias=True, use_bn=True, model_name="bn_vgg11b", **kwargs)
def bn_vgg13b(**kwargs):
"""
VGG-13 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, use_bias=True, use_bn=True, model_name="bn_vgg13b", **kwargs)
def bn_vgg16b(**kwargs):
"""
VGG-16 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, use_bias=True, use_bn=True, model_name="bn_vgg16b", **kwargs)
def bn_vgg19b(**kwargs):
"""
VGG-19 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, use_bias=True, use_bn=True, model_name="bn_vgg19b", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
vgg11,
vgg13,
vgg16,
vgg19,
bn_vgg11,
bn_vgg13,
bn_vgg16,
bn_vgg19,
bn_vgg11b,
bn_vgg13b,
bn_vgg16b,
bn_vgg19b,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != vgg11 or weight_count == 132863336)
assert (model != vgg13 or weight_count == 133047848)
assert (model != vgg16 or weight_count == 138357544)
assert (model != vgg19 or weight_count == 143667240)
assert (model != bn_vgg11 or weight_count == 132866088)
assert (model != bn_vgg13 or weight_count == 133050792)
assert (model != bn_vgg16 or weight_count == 138361768)
assert (model != bn_vgg19 or weight_count == 143672744)
assert (model != bn_vgg11b or weight_count == 132868840)
assert (model != bn_vgg13b or weight_count == 133053736)
assert (model != bn_vgg16b or weight_count == 138365992)
assert (model != bn_vgg19b or weight_count == 143678248)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 15,566 | 30.576065 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/mnasnet.py | """
MnasNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626.
"""
__all__ = ['MnasNet', 'mnasnet_b1', 'mnasnet_a1', 'mnasnet_small']
import os
import tensorflow as tf
from .common import is_channels_first, flatten, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block,\
se_block, round_channels
def dws_exp_se_res_unit(x,
in_channels,
out_channels,
strides=1,
use_kernel3=True,
exp_factor=1,
se_factor=0,
use_skip=True,
activation="relu",
training=False,
data_format="channels_last",
name="dws_exp_se_res_unit"):
"""
Depthwise separable expanded residual unit with SE-block. Here it used as MnasNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the second convolution layer.
use_kernel3 : bool, default True
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : int, default 1
Expansion factor for each unit.
se_factor : int, default 0
SE reduction factor for each unit.
use_skip : bool, default True
Whether to use skip connection.
activation : str, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'dws_exp_se_res_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (exp_factor >= 1)
residual = (in_channels == out_channels) and (strides == 1) and use_skip
use_exp_conv = exp_factor > 1
use_se = se_factor > 0
mid_channels = exp_factor * in_channels
dwconv_block_fn = dwconv3x3_block if use_kernel3 else dwconv5x5_block
if residual:
identity = x
if use_exp_conv:
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
activation=activation,
training=training,
data_format=data_format,
name=name + "/exp_conv")
x = dwconv_block_fn(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=activation,
training=training,
data_format=data_format,
name=name + "/dw_conv")
if use_se:
x = se_block(
x=x,
channels=mid_channels,
reduction=(exp_factor * se_factor),
approx_sigmoid=False,
round_mid=False,
activation=activation,
data_format=data_format,
name=name + "/se")
x = conv1x1_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
training=training,
data_format=data_format,
name=name + "/pw_conv")
if residual:
x = x + identity
return x
def mnas_init_block(x,
in_channels,
out_channels,
mid_channels,
use_skip,
training,
data_format,
name="mnas_init_block"):
"""
MnasNet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
use_skip : bool
Whether to use skip connection in the second block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'mnas_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
training=training,
data_format=data_format,
name=name + "/conv1")
x = dws_exp_se_res_unit(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
use_skip=use_skip,
training=training,
data_format=data_format,
name=name + "/conv2")
return x
def mnas_final_block(x,
in_channels,
out_channels,
mid_channels,
use_skip,
training,
data_format,
name="mnas_final_block"):
"""
MnasNet specific final block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
use_skip : bool
Whether to use skip connection in the second block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'mnas_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = dws_exp_se_res_unit(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
exp_factor=6,
use_skip=use_skip,
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv1x1_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
training=training,
data_format=data_format,
name=name + "/conv2")
return x
class MnasNet(object):
"""
MnasNet model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : list of 2 int
Number of output channels for the initial unit.
final_block_channels : list of 2 int
Number of output channels for the final block of the feature extractor.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
se_factors : list of list of int
SE reduction factor for each unit.
init_block_use_skip : bool
Whether to use skip connection in the initial unit.
final_block_use_skip : bool
Whether to use skip connection in the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernels3,
exp_factors,
se_factors,
init_block_use_skip,
final_block_use_skip,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MnasNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.kernels3 = kernels3
self.exp_factors = exp_factors
self.se_factors = se_factors
self.init_block_use_skip = init_block_use_skip
self.final_block_use_skip = final_block_use_skip
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = mnas_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels[1],
mid_channels=self.init_block_channels[0],
use_skip=self.init_block_use_skip,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels[1]
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
use_kernel3 = self.kernels3[i][j] == 1
exp_factor = self.exp_factors[i][j]
se_factor = self.se_factors[i][j]
x = dws_exp_se_res_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
se_factor=se_factor,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = mnas_final_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels[1],
mid_channels=self.final_block_channels[0],
use_skip=self.final_block_use_skip,
training=training,
data_format=self.data_format,
name="features/final_block")
# in_channels = self.final_block_channels[1]
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_mnasnet(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".keras", "models"),
**kwargs):
"""
Create MnasNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('b1', 'a1' or 'small').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
if version == "b1":
init_block_channels = [32, 16]
final_block_channels = [320, 1280]
channels = [[24, 24, 24], [40, 40, 40], [80, 80, 80, 96, 96], [192, 192, 192, 192]]
kernels3 = [[1, 1, 1], [0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 0, 0]]
exp_factors = [[3, 3, 3], [3, 3, 3], [6, 6, 6, 6, 6], [6, 6, 6, 6]]
se_factors = [[0, 0, 0], [0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0]]
init_block_use_skip = False
final_block_use_skip = False
elif version == "a1":
init_block_channels = [32, 16]
final_block_channels = [320, 1280]
channels = [[24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]]
kernels3 = [[1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]]
exp_factors = [[6, 6], [3, 3, 3], [6, 6, 6, 6, 6, 6], [6, 6, 6]]
se_factors = [[0, 0], [4, 4, 4], [0, 0, 0, 0, 4, 4], [4, 4, 4]]
init_block_use_skip = False
final_block_use_skip = True
elif version == "small":
init_block_channels = [8, 8]
final_block_channels = [144, 1280]
channels = [[16], [16, 16], [32, 32, 32, 32, 32, 32, 32], [88, 88, 88]]
kernels3 = [[1], [1, 1], [0, 0, 0, 0, 1, 1, 1], [0, 0, 0]]
exp_factors = [[3], [6, 6], [6, 6, 6, 6, 6, 6, 6], [6, 6, 6]]
se_factors = [[0], [0, 0], [4, 4, 4, 4, 4, 4, 4], [4, 4, 4]]
init_block_use_skip = True
final_block_use_skip = True
else:
raise ValueError("Unsupported MnasNet version {}".format(version))
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale)
net = MnasNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernels3=kernels3,
exp_factors=exp_factors,
se_factors=se_factors,
init_block_use_skip=init_block_use_skip,
final_block_use_skip=final_block_use_skip,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def mnasnet_b1(**kwargs):
"""
MnasNet-B1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="b1", width_scale=1.0, model_name="mnasnet_b1", **kwargs)
def mnasnet_a1(**kwargs):
"""
MnasNet-A1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="a1", width_scale=1.0, model_name="mnasnet_a1", **kwargs)
def mnasnet_small(**kwargs):
"""
MnasNet-Small model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="small", width_scale=1.0, model_name="mnasnet_small", **kwargs)
def _test():
import numpy as np
# import logging
# logging.getLogger("tensorflow").disabled = True
data_format = "channels_last"
pretrained = False
models = [
mnasnet_b1,
mnasnet_a1,
mnasnet_small,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.compat.v1.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.compat.v1.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mnasnet_b1 or weight_count == 4383312)
assert (model != mnasnet_a1 or weight_count == 3887038)
assert (model != mnasnet_small or weight_count == 2030264)
with tf.compat.v1.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.compat.v1.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.compat.v1.reset_default_graph()
if __name__ == "__main__":
_test()
| 17,642 | 32.478178 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/seresnet.py | """
SE-ResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEResNet', 'seresnet10', 'seresnet12', 'seresnet14', 'seresnet16', 'seresnet18', 'seresnet26',
'seresnetbc26b', 'seresnet34', 'seresnetbc38b', 'seresnet50', 'seresnet50b', 'seresnet101', 'seresnet101b',
'seresnet152', 'seresnet152b', 'seresnet200', 'seresnet200b']
import os
import tensorflow as tf
from .common import conv1x1_block, se_block, is_channels_first, flatten
from .resnet import res_block, res_bottleneck_block, res_init_block
def seres_unit(x,
in_channels,
out_channels,
strides,
bottleneck,
conv1_stride,
training,
data_format,
name="seres_unit"):
"""
ResNet unit with residual connection.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'seres_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
identity = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
training=training,
data_format=data_format,
name=name + "/identity_conv")
else:
identity = x
if bottleneck:
x = res_bottleneck_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
training=training,
data_format=data_format,
name=name + "/body")
else:
x = res_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
training=training,
data_format=data_format,
name=name + "/body")
x = se_block(
x=x,
channels=out_channels,
data_format=data_format,
name=name + "/se")
x = x + identity
x = tf.nn.relu(x, name=name + "/activ")
return x
class SEResNet(object):
"""
SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SEResNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.bottleneck = bottleneck
self.conv1_stride = conv1_stride
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = res_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = seres_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=self.bottleneck,
conv1_stride=self.conv1_stride,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_seresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SE-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SE-ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def seresnet10(**kwargs):
"""
SE-ResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=10, model_name="seresnet10", **kwargs)
def seresnet12(**kwargs):
"""
SE-ResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=12, model_name="seresnet12", **kwargs)
def seresnet14(**kwargs):
"""
SE-ResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=14, model_name="seresnet14", **kwargs)
def seresnet16(**kwargs):
"""
SE-ResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=16, model_name="seresnet16", **kwargs)
def seresnet18(**kwargs):
"""
SE-ResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=18, model_name="seresnet18", **kwargs)
def seresnet26(**kwargs):
"""
SE-ResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=26, bottleneck=False, model_name="seresnet26", **kwargs)
def seresnetbc26b(**kwargs):
"""
SE-ResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="seresnetbc26b", **kwargs)
def seresnet34(**kwargs):
"""
SE-ResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=34, model_name="seresnet34", **kwargs)
def seresnetbc38b(**kwargs):
"""
SE-ResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="seresnetbc38b", **kwargs)
def seresnet50(**kwargs):
"""
SE-ResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=50, model_name="seresnet50", **kwargs)
def seresnet50b(**kwargs):
"""
SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=50, conv1_stride=False, model_name="seresnet50b", **kwargs)
def seresnet101(**kwargs):
"""
SE-ResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=101, model_name="seresnet101", **kwargs)
def seresnet101b(**kwargs):
"""
SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=101, conv1_stride=False, model_name="seresnet101b", **kwargs)
def seresnet152(**kwargs):
"""
SE-ResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=152, model_name="seresnet152", **kwargs)
def seresnet152b(**kwargs):
"""
SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=152, conv1_stride=False, model_name="seresnet152b", **kwargs)
def seresnet200(**kwargs):
"""
SE-ResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=200, model_name="seresnet200", **kwargs)
def seresnet200b(**kwargs):
"""
SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=200, conv1_stride=False, model_name="seresnet200b", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
seresnet10,
seresnet12,
seresnet14,
seresnet16,
seresnet18,
seresnet26,
seresnetbc26b,
seresnet34,
seresnetbc38b,
seresnet50,
seresnet50b,
seresnet101,
seresnet101b,
seresnet152,
seresnet152b,
seresnet200,
seresnet200b,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet10 or weight_count == 5463332)
assert (model != seresnet12 or weight_count == 5537896)
assert (model != seresnet14 or weight_count == 5835504)
assert (model != seresnet16 or weight_count == 7024640)
assert (model != seresnet18 or weight_count == 11778592)
assert (model != seresnet26 or weight_count == 18093852)
assert (model != seresnetbc26b or weight_count == 17395976)
assert (model != seresnet34 or weight_count == 21958868)
assert (model != seresnetbc38b or weight_count == 24026616)
assert (model != seresnet50 or weight_count == 28088024)
assert (model != seresnet50b or weight_count == 28088024)
assert (model != seresnet101 or weight_count == 49326872)
assert (model != seresnet101b or weight_count == 49326872)
assert (model != seresnet152 or weight_count == 66821848)
assert (model != seresnet152b or weight_count == 66821848)
assert (model != seresnet200 or weight_count == 71835864)
assert (model != seresnet200b or weight_count == 71835864)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 21,991 | 30.194326 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/densenet.py | """
DenseNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
"""
__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201']
import os
import tensorflow as tf
from .common import pre_conv1x1_block, pre_conv3x3_block, is_channels_first, get_channel_axis, flatten
from .preresnet import preres_init_block, preres_activation
def dense_unit(x,
in_channels,
out_channels,
dropout_rate,
training,
data_format,
name="dense_unit"):
"""
DenseNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'dense_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
identity = x
x = pre_conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
training=training,
data_format=data_format,
name=name + "/conv1")
x = pre_conv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=inc_channels,
training=training,
data_format=data_format,
name=name + "/conv2")
use_dropout = (dropout_rate != 0.0)
if use_dropout:
x = tf.keras.layers.Dropout(
rate=dropout_rate,
name=name + "dropout")(
inputs=x,
training=training)
x = tf.concat([identity, x], axis=get_channel_axis(data_format), name=name + "/concat")
return x
def transition_block(x,
in_channels,
out_channels,
training,
data_format,
name="transition_block"):
"""
DenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the
first unit of each stage.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'transition_block'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = pre_conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
training=training,
data_format=data_format,
name=name + "/conv")
x = tf.keras.layers.AveragePooling2D(
pool_size=2,
strides=2,
data_format=data_format,
name=name + "/pool")(x)
return x
class DenseNet(object):
"""
DenseNet model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
dropout_rate=0.0,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(DenseNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.dropout_rate = dropout_rate
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = preres_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
if i != 0:
x = transition_block(
x=x,
in_channels=in_channels,
out_channels=(in_channels // 2),
training=training,
data_format=self.data_format,
name="features/stage{}/trans{}".format(i + 1, i + 1))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
x = dense_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
dropout_rate=self.dropout_rate,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = preres_activation(
x=x,
training=training,
data_format=self.data_format,
name="features/post_activ")
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_densenet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DenseNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if blocks == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif blocks == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif blocks == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif blocks == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError("Unsupported DenseNet version with number of layers {}".format(blocks))
from functools import reduce
channels = reduce(lambda xi, yi:
xi + [reduce(lambda xj, yj:
xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = DenseNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def densenet121(**kwargs):
"""
DenseNet-121 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_densenet(blocks=121, model_name="densenet121", **kwargs)
def densenet161(**kwargs):
"""
DenseNet-161 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_densenet(blocks=161, model_name="densenet161", **kwargs)
def densenet169(**kwargs):
"""
DenseNet-169 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_densenet(blocks=169, model_name="densenet169", **kwargs)
def densenet201(**kwargs):
"""
DenseNet-201 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_densenet(blocks=201, model_name="densenet201", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
densenet121,
densenet161,
densenet169,
densenet201,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != densenet121 or weight_count == 7978856)
assert (model != densenet161 or weight_count == 28681000)
assert (model != densenet169 or weight_count == 14149480)
assert (model != densenet201 or weight_count == 20013928)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 13,065 | 29.816038 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/seresnext.py | """
SE-ResNeXt for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEResNeXt', 'seresnext50_32x4d', 'seresnext101_32x4d', 'seresnext101_64x4d']
import os
import tensorflow as tf
from .common import conv1x1_block, se_block, is_channels_first, flatten
from .resnet import res_init_block
from .resnext import resnext_bottleneck
def seresnext_unit(x,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
training,
data_format,
name="seresnext_unit"):
"""
SE-ResNeXt unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'seresnext_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
identity = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
training=training,
data_format=data_format,
name=name + "/identity_conv")
else:
identity = x
x = resnext_bottleneck(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
training=training,
data_format=data_format,
name=name + "/body")
x = se_block(
x=x,
channels=out_channels,
data_format=data_format,
name=name + "/se")
x = x + identity
x = tf.nn.relu(x, name=name + "/activ")
return x
class SEResNeXt(object):
"""
SE-ResNeXt model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SEResNeXt, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = res_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = seresnext_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_seresnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SE-ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported SE-ResNeXt with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def seresnext50_32x4d(**kwargs):
"""
SE-ResNeXt-50 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="seresnext50_32x4d", **kwargs)
def seresnext101_32x4d(**kwargs):
"""
SE-ResNeXt-101 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="seresnext101_32x4d", **kwargs)
def seresnext101_64x4d(**kwargs):
"""
SE-ResNeXt-101 (64x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="seresnext101_64x4d", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
seresnext50_32x4d,
seresnext101_32x4d,
seresnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnext50_32x4d or weight_count == 27559896)
assert (model != seresnext101_32x4d or weight_count == 48955416)
assert (model != seresnext101_64x4d or weight_count == 88232984)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 10,990 | 30.048023 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/mobilenetv3.py | """
MobileNetV3 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
"""
__all__ = ['MobileNetV3', 'mobilenetv3_small_w7d20', 'mobilenetv3_small_wd2', 'mobilenetv3_small_w3d4',
'mobilenetv3_small_w1', 'mobilenetv3_small_w5d4', 'mobilenetv3_large_w7d20', 'mobilenetv3_large_wd2',
'mobilenetv3_large_w3d4', 'mobilenetv3_large_w1', 'mobilenetv3_large_w5d4']
import os
import tensorflow as tf
from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block,\
se_block, hswish, is_channels_first, flatten
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
def mobilenetv3_unit(x,
in_channels,
out_channels,
exp_channels,
strides,
use_kernel3,
activation,
use_se,
training,
data_format,
name="mobilenetv3_unit"):
"""
MobileNetV3 unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
exp_channels : int
Number of middle (expanded) channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
activation : str
Activation function or name of activation function.
use_se : bool
Whether to use SE-module.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'mobilenetv3_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (exp_channels >= out_channels)
residual = (in_channels == out_channels) and (strides == 1)
use_exp_conv = exp_channels != out_channels
mid_channels = exp_channels
if residual:
identity = x
if use_exp_conv:
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
activation=activation,
training=training,
data_format=data_format,
name=name + "/exp_conv")
if use_kernel3:
x = dwconv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=activation,
training=training,
data_format=data_format,
name=name + "/conv1")
else:
x = dwconv5x5_block(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=activation,
name=name + "/conv1")
if use_se:
x = se_block(
x=x,
channels=mid_channels,
reduction=4,
approx_sigmoid=True,
round_mid=True,
data_format=data_format,
name=name + "/se")
x = conv1x1_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv2")
if residual:
x = x + identity
return x
def mobilenetv3_final_block(x,
in_channels,
out_channels,
use_se,
training,
data_format,
name="mobilenetv3_final_block"):
"""
MobileNetV3 final block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_se : bool
Whether to use SE-module.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'mobilenetv3_final_block'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
activation="hswish",
training=training,
data_format=data_format,
name=name + "/conv")
if use_se:
x = se_block(
x=x,
channels=out_channels,
reduction=4,
approx_sigmoid=True,
round_mid=True,
data_format=data_format,
name=name + "/se")
return x
def mobilenetv3_classifier(x,
in_channels,
out_channels,
mid_channels,
dropout_rate,
training,
data_format,
name="mobilenetv3_final_block"):
"""
MobileNetV3 classifier.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'mobilenetv3_classifier'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv1x1(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name=name + "/conv1")
x = hswish(x, name=name + "/hswish")
use_dropout = (dropout_rate != 0.0)
if use_dropout:
x = tf.keras.layers.Dropout(
rate=dropout_rate,
name=name + "dropout")(
inputs=x,
training=training)
x = conv1x1(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name=name + "/conv2")
return x
class MobileNetV3(object):
"""
MobileNetV3 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
exp_channels : list of list of int
Number of middle (expanded) channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
classifier_mid_channels : int
Number of middle channels for classifier.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
use_relu : list of list of int/bool
Using ReLU activation flag for each unit.
use_se : list of list of int/bool
Using SE-block flag for each unit.
first_stride : bool
Whether to use stride for the first stage.
final_use_se : bool
Whether to use SE-module in the final block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
exp_channels,
init_block_channels,
final_block_channels,
classifier_mid_channels,
kernels3,
use_relu,
use_se,
first_stride,
final_use_se,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MobileNetV3, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.exp_channels = exp_channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.classifier_mid_channels = classifier_mid_channels
self.kernels3 = kernels3
self.use_relu = use_relu
self.use_se = use_se
self.first_stride = first_stride
self.final_use_se = final_use_se
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
strides=2,
activation="hswish",
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
exp_channels_ij = self.exp_channels[i][j]
strides = 2 if (j == 0) and ((i != 0) or self.first_stride) else 1
use_kernel3 = self.kernels3[i][j] == 1
activation = "relu" if self.use_relu[i][j] == 1 else "hswish"
use_se_flag = self.use_se[i][j] == 1
x = mobilenetv3_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
exp_channels=exp_channels_ij,
use_kernel3=use_kernel3,
strides=strides,
activation=activation,
use_se=use_se_flag,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = mobilenetv3_final_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels,
use_se=self.final_use_se,
training=training,
data_format=self.data_format,
name="features/final_block")
in_channels = self.final_block_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
x = mobilenetv3_classifier(
x=x,
in_channels=in_channels,
out_channels=self.classes,
mid_channels=self.classifier_mid_channels,
dropout_rate=0.2,
training=training,
data_format=self.data_format,
name="output")
x = flatten(
x=x,
data_format=self.data_format)
return x
def get_mobilenetv3(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create MobileNetV3 model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('small' or 'large').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "small":
init_block_channels = 16
channels = [[16], [24, 24], [40, 40, 40, 48, 48], [96, 96, 96]]
exp_channels = [[16], [72, 88], [96, 240, 240, 120, 144], [288, 576, 576]]
kernels3 = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]]
use_relu = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]]
use_se = [[1], [0, 0], [1, 1, 1, 1, 1], [1, 1, 1]]
first_stride = True
final_block_channels = 576
elif version == "large":
init_block_channels = 16
channels = [[16], [24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]]
exp_channels = [[16], [64, 72], [72, 120, 120], [240, 200, 184, 184, 480, 672], [672, 960, 960]]
kernels3 = [[1], [1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]]
use_relu = [[1], [1, 1], [1, 1, 1], [0, 0, 0, 0, 0, 0], [0, 0, 0]]
use_se = [[0], [0, 0], [1, 1, 1], [0, 0, 0, 0, 1, 1], [1, 1, 1]]
first_stride = False
final_block_channels = 960
else:
raise ValueError("Unsupported MobileNetV3 version {}".format(version))
final_use_se = False
classifier_mid_channels = 1280
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
exp_channels = [[round_channels(cij * width_scale) for cij in ci] for ci in exp_channels]
init_block_channels = round_channels(init_block_channels * width_scale)
if width_scale > 1.0:
final_block_channels = round_channels(final_block_channels * width_scale)
net = MobileNetV3(
channels=channels,
exp_channels=exp_channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
classifier_mid_channels=classifier_mid_channels,
kernels3=kernels3,
use_relu=use_relu,
use_se=use_se,
first_stride=first_stride,
final_use_se=final_use_se,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def mobilenetv3_small_w7d20(**kwargs):
"""
MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs)
def mobilenetv3_small_wd2(**kwargs):
"""
MobileNetV3 Small 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.5, model_name="mobilenetv3_small_wd2", **kwargs)
def mobilenetv3_small_w3d4(**kwargs):
"""
MobileNetV3 Small 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.75, model_name="mobilenetv3_small_w3d4", **kwargs)
def mobilenetv3_small_w1(**kwargs):
"""
MobileNetV3 Small 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=1.0, model_name="mobilenetv3_small_w1", **kwargs)
def mobilenetv3_small_w5d4(**kwargs):
"""
MobileNetV3 Small 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=1.25, model_name="mobilenetv3_small_w5d4", **kwargs)
def mobilenetv3_large_w7d20(**kwargs):
"""
MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs)
def mobilenetv3_large_wd2(**kwargs):
"""
MobileNetV3 Large 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.5, model_name="mobilenetv3_large_wd2", **kwargs)
def mobilenetv3_large_w3d4(**kwargs):
"""
MobileNetV3 Large 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.75, model_name="mobilenetv3_large_w3d4", **kwargs)
def mobilenetv3_large_w1(**kwargs):
"""
MobileNetV3 Large 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=1.0, model_name="mobilenetv3_large_w1", **kwargs)
def mobilenetv3_large_w5d4(**kwargs):
"""
MobileNetV3 Large 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=1.25, model_name="mobilenetv3_large_w5d4", **kwargs)
def _test():
import numpy as np
# import logging
# logging.getLogger("tensorflow").disabled = True
data_format = "channels_last"
pretrained = False
models = [
mobilenetv3_small_w7d20,
mobilenetv3_small_wd2,
mobilenetv3_small_w3d4,
mobilenetv3_small_w1,
mobilenetv3_small_w5d4,
mobilenetv3_large_w7d20,
mobilenetv3_large_wd2,
mobilenetv3_large_w3d4,
mobilenetv3_large_w1,
mobilenetv3_large_w5d4,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.compat.v1.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.compat.v1.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetv3_small_w7d20 or weight_count == 2159600)
assert (model != mobilenetv3_small_wd2 or weight_count == 2288976)
assert (model != mobilenetv3_small_w3d4 or weight_count == 2581312)
assert (model != mobilenetv3_small_w1 or weight_count == 2945288)
assert (model != mobilenetv3_small_w5d4 or weight_count == 3643632)
assert (model != mobilenetv3_large_w7d20 or weight_count == 2943080)
assert (model != mobilenetv3_large_wd2 or weight_count == 3334896)
assert (model != mobilenetv3_large_w3d4 or weight_count == 4263496)
assert (model != mobilenetv3_large_w1 or weight_count == 5481752)
assert (model != mobilenetv3_large_w5d4 or weight_count == 7459144)
with tf.compat.v1.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.compat.v1.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.compat.v1.reset_default_graph()
if __name__ == "__main__":
_test()
| 22,437 | 33.048558 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/sepreresnet.py | """
SE-PreResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEPreResNet', 'sepreresnet10', 'sepreresnet12', 'sepreresnet14', 'sepreresnet16', 'sepreresnet18',
'sepreresnet26', 'sepreresnetbc26b', 'sepreresnet34', 'sepreresnetbc38b', 'sepreresnet50', 'sepreresnet50b',
'sepreresnet101', 'sepreresnet101b', 'sepreresnet152', 'sepreresnet152b', 'sepreresnet200',
'sepreresnet200b']
import os
import tensorflow as tf
from .common import conv1x1, se_block, is_channels_first, flatten
from .preresnet import preres_block, preres_bottleneck_block, preres_init_block, preres_activation
def sepreres_unit(x,
in_channels,
out_channels,
strides,
bottleneck,
conv1_stride,
training,
data_format,
name="sepreres_unit"):
"""
SE-PreResNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'sepreres_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
identity = x
if bottleneck:
x, x_pre_activ = preres_bottleneck_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
training=training,
data_format=data_format,
name=name + "/body")
else:
x, x_pre_activ = preres_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
training=training,
data_format=data_format,
name=name + "/body")
x = se_block(
x=x,
channels=out_channels,
data_format=data_format,
name=name + "/se")
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
identity = conv1x1(
x=x_pre_activ,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name=name + "/identity_conv/conv")
x = x + identity
return x
class SEPreResNet(object):
"""
SE-PreResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SEPreResNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.bottleneck = bottleneck
self.conv1_stride = conv1_stride
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = preres_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = sepreres_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=self.bottleneck,
conv1_stride=self.conv1_stride,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = preres_activation(
x=x,
training=training,
data_format=self.data_format,
name="features/post_activ")
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_sepreresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SE-PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported SE-PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def sepreresnet10(**kwargs):
"""
SE-PreResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=10, model_name="sepreresnet10", **kwargs)
def sepreresnet12(**kwargs):
"""
SE-PreResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=12, model_name="sepreresnet12", **kwargs)
def sepreresnet14(**kwargs):
"""
SE-PreResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=14, model_name="sepreresnet14", **kwargs)
def sepreresnet16(**kwargs):
"""
SE-PreResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=16, model_name="sepreresnet16", **kwargs)
def sepreresnet18(**kwargs):
"""
SE-PreResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=18, model_name="sepreresnet18", **kwargs)
def sepreresnet26(**kwargs):
"""
SE-PreResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=26, model_name="sepreresnet26", **kwargs)
def sepreresnetbc26b(**kwargs):
"""
SE-PreResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc26b", **kwargs)
def sepreresnet34(**kwargs):
"""
SE-PreResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=34, model_name="sepreresnet34", **kwargs)
def sepreresnetbc38b(**kwargs):
"""
SE-PreResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc38b", **kwargs)
def sepreresnet50(**kwargs):
"""
SE-PreResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=50, model_name="sepreresnet50", **kwargs)
def sepreresnet50b(**kwargs):
"""
SE-PreResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=50, conv1_stride=False, model_name="sepreresnet50b", **kwargs)
def sepreresnet101(**kwargs):
"""
SE-PreResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=101, model_name="sepreresnet101", **kwargs)
def sepreresnet101b(**kwargs):
"""
SE-PreResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=101, conv1_stride=False, model_name="sepreresnet101b", **kwargs)
def sepreresnet152(**kwargs):
"""
SE-PreResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=152, model_name="sepreresnet152", **kwargs)
def sepreresnet152b(**kwargs):
"""
SE-PreResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=152, conv1_stride=False, model_name="sepreresnet152b", **kwargs)
def sepreresnet200(**kwargs):
"""
SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an
experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs)
def sepreresnet200b(**kwargs):
"""
SE-PreResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=200, conv1_stride=False, model_name="sepreresnet200b", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
sepreresnet10,
sepreresnet12,
sepreresnet14,
sepreresnet16,
sepreresnet18,
sepreresnet26,
sepreresnetbc26b,
sepreresnet34,
sepreresnetbc38b,
sepreresnet50,
sepreresnet50b,
sepreresnet101,
sepreresnet101b,
sepreresnet152,
sepreresnet152b,
sepreresnet200,
sepreresnet200b,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sepreresnet10 or weight_count == 5461668)
assert (model != sepreresnet12 or weight_count == 5536232)
assert (model != sepreresnet14 or weight_count == 5833840)
assert (model != sepreresnet16 or weight_count == 7022976)
assert (model != sepreresnet18 or weight_count == 11776928)
assert (model != sepreresnet26 or weight_count == 18092188)
assert (model != sepreresnetbc26b or weight_count == 17388424)
assert (model != sepreresnet34 or weight_count == 21957204)
assert (model != sepreresnetbc38b or weight_count == 24019064)
assert (model != sepreresnet50 or weight_count == 28080472)
assert (model != sepreresnet50b or weight_count == 28080472)
assert (model != sepreresnet101 or weight_count == 49319320)
assert (model != sepreresnet101b or weight_count == 49319320)
assert (model != sepreresnet152 or weight_count == 66814296)
assert (model != sepreresnet152b or weight_count == 66814296)
assert (model != sepreresnet200 or weight_count == 71828312)
assert (model != sepreresnet200b or weight_count == 71828312)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 22,299 | 30.766382 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/resnext.py | """
ResNeXt for ImageNet-1K, implemented in TensorFlow.
Original papers: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
"""
__all__ = ['ResNeXt', 'resnext14_16x4d', 'resnext14_32x2d', 'resnext14_32x4d', 'resnext26_16x4d', 'resnext26_32x2d',
'resnext26_32x4d', 'resnext38_32x4d', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d',
'resnext_bottleneck']
import os
import math
import tensorflow as tf
from .common import conv1x1_block, conv3x3_block, is_channels_first, flatten
from .resnet import res_init_block
def resnext_bottleneck(x,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bottleneck_factor=4,
training=False,
data_format="channels_last",
name="resnext_bottleneck"):
"""
ResNeXt bottleneck block for residual path in ResNeXt unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bottleneck_factor : int, default 4
Bottleneck factor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'resnext_bottleneck'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // bottleneck_factor
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=group_width,
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv3x3_block(
x=x,
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=cardinality,
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv1x1_block(
x=x,
in_channels=group_width,
out_channels=out_channels,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv3")
return x
def resnext_unit(x,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
training,
data_format,
name="resnext_unit"):
"""
ResNeXt unit with residual connection.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'resnext_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
identity = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
training=training,
data_format=data_format,
name=name + "/identity_conv")
else:
identity = x
x = resnext_bottleneck(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
training=training,
data_format=data_format,
name=name + "/body")
x = x + identity
x = tf.nn.relu(x, name=name + "/activ")
return x
class ResNeXt(object):
"""
ResNeXt model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ResNeXt, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = res_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = resnext_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_resnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if blocks == 14:
layers = [1, 1, 1, 1]
elif blocks == 26:
layers = [2, 2, 2, 2]
elif blocks == 38:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported ResNeXt with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = ResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def resnext14_16x4d(**kwargs):
"""
ResNeXt-14 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=14, cardinality=16, bottleneck_width=4, model_name="resnext14_16x4d", **kwargs)
def resnext14_32x2d(**kwargs):
"""
ResNeXt-14 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=14, cardinality=32, bottleneck_width=2, model_name="resnext14_32x2d", **kwargs)
def resnext14_32x4d(**kwargs):
"""
ResNeXt-14 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=14, cardinality=32, bottleneck_width=4, model_name="resnext14_32x4d", **kwargs)
def resnext26_16x4d(**kwargs):
"""
ResNeXt-26 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=26, cardinality=16, bottleneck_width=4, model_name="resnext26_16x4d", **kwargs)
def resnext26_32x2d(**kwargs):
"""
ResNeXt-26 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=26, cardinality=32, bottleneck_width=2, model_name="resnext26_32x2d", **kwargs)
def resnext26_32x4d(**kwargs):
"""
ResNeXt-26 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=26, cardinality=32, bottleneck_width=4, model_name="resnext26_32x4d", **kwargs)
def resnext38_32x4d(**kwargs):
"""
ResNeXt-38 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=38, cardinality=32, bottleneck_width=4, model_name="resnext38_32x4d", **kwargs)
def resnext50_32x4d(**kwargs):
"""
ResNeXt-50 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="resnext50_32x4d", **kwargs)
def resnext101_32x4d(**kwargs):
"""
ResNeXt-101 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="resnext101_32x4d", **kwargs)
def resnext101_64x4d(**kwargs):
"""
ResNeXt-101 (64x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="resnext101_64x4d", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
resnext14_16x4d,
resnext14_32x2d,
resnext14_32x4d,
resnext26_16x4d,
resnext26_32x2d,
resnext26_32x4d,
resnext38_32x4d,
resnext50_32x4d,
resnext101_32x4d,
resnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnext14_16x4d or weight_count == 7127336)
assert (model != resnext14_32x2d or weight_count == 7029416)
assert (model != resnext14_32x4d or weight_count == 9411880)
assert (model != resnext26_16x4d or weight_count == 10119976)
assert (model != resnext26_32x2d or weight_count == 9924136)
assert (model != resnext26_32x4d or weight_count == 15389480)
assert (model != resnext38_32x4d or weight_count == 21367080)
assert (model != resnext50_32x4d or weight_count == 25028904)
assert (model != resnext101_32x4d or weight_count == 44177704)
assert (model != resnext101_64x4d or weight_count == 83455272)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 18,384 | 30.320273 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/senet.py | """
SENet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SENet', 'senet16', 'senet28', 'senet40', 'senet52', 'senet103', 'senet154']
import os
import math
import tensorflow as tf
from .common import conv1x1_block, conv3x3_block, maxpool2d, se_block, is_channels_first, flatten
def senet_bottleneck(x,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
training,
data_format,
name="senet_bottleneck"):
"""
SENet bottleneck block for residual path in SENet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'senet_bottleneck'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
group_width2 = group_width // 2
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=group_width2,
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv3x3_block(
x=x,
in_channels=group_width2,
out_channels=group_width,
strides=strides,
groups=cardinality,
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv1x1_block(
x=x,
in_channels=group_width,
out_channels=out_channels,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv3")
return x
def senet_unit(x,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
identity_conv3x3,
training,
data_format,
name="senet_unit"):
"""
SENet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
identity_conv3x3 : bool, default False
Whether to use 3x3 convolution in the identity link.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'senet_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
if identity_conv3x3:
identity = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
training=training,
data_format=data_format,
name=name + "/identity_conv")
else:
identity = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
training=training,
data_format=data_format,
name=name + "/identity_conv")
else:
identity = x
x = senet_bottleneck(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
training=training,
data_format=data_format,
name=name + "/body")
x = se_block(
x=x,
channels=out_channels,
data_format=data_format,
name=name + "/se")
x = x + identity
x = tf.nn.relu(x, name=name + "/activ")
return x
def senet_init_block(x,
in_channels,
out_channels,
training,
data_format,
name="senet_init_block"):
"""
SENet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'senet_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // 2
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
training=training,
data_format=data_format,
name=name + "/conv3")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name=name + "/pool")
return x
class SENet(object):
"""
SENet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SENet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = senet_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
identity_conv3x3 = (i != 0)
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = senet_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
identity_conv3x3=identity_conv3x3,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dropout(
rate=0.2,
name="output/dropout")(
inputs=x,
training=training)
x = tf.keras.layers.Dense(
units=self.classes,
name="output/fc")(x)
return x
def get_senet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SENet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if blocks == 16:
layers = [1, 1, 1, 1]
cardinality = 32
elif blocks == 28:
layers = [2, 2, 2, 2]
cardinality = 32
elif blocks == 40:
layers = [3, 3, 3, 3]
cardinality = 32
elif blocks == 52:
layers = [3, 4, 6, 3]
cardinality = 32
elif blocks == 103:
layers = [3, 4, 23, 3]
cardinality = 32
elif blocks == 154:
layers = [3, 8, 36, 3]
cardinality = 64
else:
raise ValueError("Unsupported SENet with number of blocks: {}".format(blocks))
bottleneck_width = 4
init_block_channels = 128
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SENet(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def senet16(**kwargs):
"""
SENet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_senet(blocks=16, model_name="senet16", **kwargs)
def senet28(**kwargs):
"""
SENet-28 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_senet(blocks=28, model_name="senet28", **kwargs)
def senet40(**kwargs):
"""
SENet-40 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_senet(blocks=40, model_name="senet40", **kwargs)
def senet52(**kwargs):
"""
SENet-52 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_senet(blocks=52, model_name="senet52", **kwargs)
def senet103(**kwargs):
"""
SENet-103 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_senet(blocks=103, model_name="senet103", **kwargs)
def senet154(**kwargs):
"""
SENet-154 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_senet(blocks=154, model_name="senet154", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
senet16,
senet28,
senet40,
senet52,
senet103,
senet154,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != senet16 or weight_count == 31366168)
assert (model != senet28 or weight_count == 36453768)
assert (model != senet40 or weight_count == 41541368)
assert (model != senet52 or weight_count == 44659416)
assert (model != senet103 or weight_count == 60963096)
assert (model != senet154 or weight_count == 115088984)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 16,887 | 28.16753 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/shufflenet.py | """
ShuffleNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
"""
__all__ = ['ShuffleNet', 'shufflenet_g1_w1', 'shufflenet_g2_w1', 'shufflenet_g3_w1', 'shufflenet_g4_w1',
'shufflenet_g8_w1', 'shufflenet_g1_w3d4', 'shufflenet_g3_w3d4', 'shufflenet_g1_wd2', 'shufflenet_g3_wd2',
'shufflenet_g1_wd4', 'shufflenet_g3_wd4']
import os
import tensorflow as tf
from .common import conv1x1, conv3x3, depthwise_conv3x3, batchnorm, channel_shuffle, maxpool2d, avgpool2d,\
is_channels_first, get_channel_axis, flatten
def shuffle_unit(x,
in_channels,
out_channels,
groups,
downsample,
ignore_group,
training,
data_format,
name="shuffle_unit"):
"""
ShuffleNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'shuffle_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // 4
if downsample:
out_channels -= in_channels
identity = x
x = conv1x1(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
groups=(1 if ignore_group else groups),
data_format=data_format,
name=name + "/compress_conv1")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/compress_bn1")
x = tf.nn.relu(x, name=name + "/activ")
x = channel_shuffle(
x=x,
groups=groups,
data_format=data_format)
x = depthwise_conv3x3(
x=x,
channels=mid_channels,
strides=(2 if downsample else 1),
data_format=data_format,
name=name + "/dw_conv2")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/dw_bn2")
x = conv1x1(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
data_format=data_format,
name=name + "/expand_conv3")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/expand_bn3")
if downsample:
identity = avgpool2d(
x=identity,
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name=name + "/avgpool")
x = tf.concat([x, identity], axis=get_channel_axis(data_format), name=name + "/concat")
else:
x = x + identity
x = tf.nn.relu(x, name=name + "/final_activ")
return x
def shuffle_init_block(x,
in_channels,
out_channels,
training,
data_format,
name="shuffle_init_block"):
"""
ShuffleNet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'shuffle_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv3x3(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
data_format=data_format,
name=name + "/conv")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
x = tf.nn.relu(x, name=name + "/activ")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name=name + "/pool")
return x
class ShuffleNet(object):
"""
ShuffleNet model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
groups,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ShuffleNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.groups = groups
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = shuffle_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = (i == 0) and (j == 0)
x = shuffle_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
groups=self.groups,
downsample=downsample,
ignore_group=ignore_group,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_shufflenet(groups,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ShuffleNet model with specific parameters.
Parameters:
----------
groups : int
Number of groups in convolution layers.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
init_block_channels = 24
layers = [4, 8, 4]
if groups == 1:
channels_per_layers = [144, 288, 576]
elif groups == 2:
channels_per_layers = [200, 400, 800]
elif groups == 3:
channels_per_layers = [240, 480, 960]
elif groups == 4:
channels_per_layers = [272, 544, 1088]
elif groups == 8:
channels_per_layers = [384, 768, 1536]
else:
raise ValueError("The {} of groups is not supported".format(groups))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
net = ShuffleNet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def shufflenet_g1_w1(**kwargs):
"""
ShuffleNet 1x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=1, width_scale=1.0, model_name="shufflenet_g1_w1", **kwargs)
def shufflenet_g2_w1(**kwargs):
"""
ShuffleNet 1x (g=2) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=2, width_scale=1.0, model_name="shufflenet_g2_w1", **kwargs)
def shufflenet_g3_w1(**kwargs):
"""
ShuffleNet 1x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=3, width_scale=1.0, model_name="shufflenet_g3_w1", **kwargs)
def shufflenet_g4_w1(**kwargs):
"""
ShuffleNet 1x (g=4) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=4, width_scale=1.0, model_name="shufflenet_g4_w1", **kwargs)
def shufflenet_g8_w1(**kwargs):
"""
ShuffleNet 1x (g=8) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=8, width_scale=1.0, model_name="shufflenet_g8_w1", **kwargs)
def shufflenet_g1_w3d4(**kwargs):
"""
ShuffleNet 0.75x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=1, width_scale=0.75, model_name="shufflenet_g1_w3d4", **kwargs)
def shufflenet_g3_w3d4(**kwargs):
"""
ShuffleNet 0.75x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=3, width_scale=0.75, model_name="shufflenet_g3_w3d4", **kwargs)
def shufflenet_g1_wd2(**kwargs):
"""
ShuffleNet 0.5x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=1, width_scale=0.5, model_name="shufflenet_g1_wd2", **kwargs)
def shufflenet_g3_wd2(**kwargs):
"""
ShuffleNet 0.5x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=3, width_scale=0.5, model_name="shufflenet_g3_wd2", **kwargs)
def shufflenet_g1_wd4(**kwargs):
"""
ShuffleNet 0.25x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=1, width_scale=0.25, model_name="shufflenet_g1_wd4", **kwargs)
def shufflenet_g3_wd4(**kwargs):
"""
ShuffleNet 0.25x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=3, width_scale=0.25, model_name="shufflenet_g3_wd4", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
shufflenet_g1_w1,
shufflenet_g2_w1,
shufflenet_g3_w1,
shufflenet_g4_w1,
shufflenet_g8_w1,
shufflenet_g1_w3d4,
shufflenet_g3_w3d4,
shufflenet_g1_wd2,
shufflenet_g3_wd2,
shufflenet_g1_wd4,
shufflenet_g3_wd4,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenet_g1_w1 or weight_count == 1531936)
assert (model != shufflenet_g2_w1 or weight_count == 1733848)
assert (model != shufflenet_g3_w1 or weight_count == 1865728)
assert (model != shufflenet_g4_w1 or weight_count == 1968344)
assert (model != shufflenet_g8_w1 or weight_count == 2434768)
assert (model != shufflenet_g1_w3d4 or weight_count == 975214)
assert (model != shufflenet_g3_w3d4 or weight_count == 1238266)
assert (model != shufflenet_g1_wd2 or weight_count == 534484)
assert (model != shufflenet_g3_wd2 or weight_count == 718324)
assert (model != shufflenet_g1_wd4 or weight_count == 209746)
assert (model != shufflenet_g3_wd4 or weight_count == 305902)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 19,344 | 30.151369 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/common.py | """
Common routines for models in TensorFlow.
"""
__all__ = ['round_channels', 'hswish', 'is_channels_first', 'get_channel_axis', 'flatten', 'batchnorm', 'maxpool2d',
'avgpool2d', 'conv2d', 'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'conv_block', 'conv1x1_block',
'conv3x3_block', 'conv7x7_block', 'dwconv3x3_block', 'dwconv5x5_block', 'pre_conv_block',
'pre_conv1x1_block', 'pre_conv3x3_block', 'se_block', 'channel_shuffle', 'channel_shuffle2']
import math
import numpy as np
import tensorflow as tf
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
def round_channels(channels,
divisor=8):
"""
Round weighted channel number (make divisible operation).
Parameters:
----------
channels : int or float
Original number of channels.
divisor : int, default 8
Alignment value.
Returns:
-------
int
Weighted number of channels.
"""
rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if float(rounded_channels) < 0.9 * channels:
rounded_channels += divisor
return rounded_channels
def hsigmoid(x,
name="hsigmoid"):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
Parameters:
----------
x : Tensor
Input tensor.
name : str, default 'hsigmoid'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return tf.nn.relu6(x + 3.0, name=name) / 6.0
def hswish(x,
name="hswish"):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
x : Tensor
Input tensor.
name : str, default 'hswish'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return x * tf.nn.relu6(x + 3.0, name=name) / 6.0
def get_activation_layer(x,
activation,
name="activ"):
"""
Create activation layer from string/function.
Parameters:
----------
x : Tensor
Input tensor.
activation : function or str
Activation function or name of activation function.
name : str, default 'activ'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (activation is not None)
if isinstance(activation, str):
if activation == "relu":
x = tf.nn.relu(x, name=name)
elif activation == "relu6":
x = tf.nn.relu6(x, name=name)
elif activation == "hswish":
x = hswish(x, name=name)
else:
raise NotImplementedError()
else:
x = activation(x)
return x
def is_channels_first(data_format):
"""
Is tested data format channels first.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
bool
A flag.
"""
return data_format == "channels_first"
def get_channel_axis(data_format):
"""
Get channel axis.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
int
Channel axis.
"""
return 1 if is_channels_first(data_format) else -1
def flatten(x,
data_format):
"""
Flattens the input to two dimensional.
Parameters:
----------
x : Tensor
Input tensor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
Tensor
Resulted tensor.
"""
if not is_channels_first(data_format):
x = tf.transpose(x, perm=(0, 3, 1, 2))
x = tf.reshape(x, shape=(-1, np.prod(x.get_shape().as_list()[1:])))
return x
def batchnorm(x,
momentum=0.9,
epsilon=1e-5,
training=False,
data_format="channels_last",
name=None):
"""
Batch normalization layer.
Parameters:
----------
x : Tensor
Input tensor.
momentum : float, default 0.9
Momentum for the moving average.
epsilon : float, default 1e-5
Small float added to variance to avoid dividing by zero.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default None
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = tf.keras.layers.BatchNormalization(
axis=get_channel_axis(data_format),
momentum=momentum,
epsilon=epsilon,
name=name)(
inputs=x,
training=training)
return x
def maxpool2d(x,
pool_size,
strides,
padding=0,
ceil_mode=False,
data_format="channels_last",
name=None):
"""
Max pooling operation for two dimensional (spatial) data.
Parameters:
----------
x : Tensor
Input tensor.
pool_size : int or tuple/list of 2 int
Size of the max pooling windows.
strides : int or tuple/list of 2 int
Strides of the pooling.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default None
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if ceil_mode:
height = int(x.shape[2])
out_height = float(height + 2 * padding[0] - pool_size[0]) / strides[0] + 1.0
if math.ceil(out_height) > math.floor(out_height):
padding = (padding[0] + 1, padding[1])
width = int(x.shape[3])
out_width = float(width + 2 * padding[1] - pool_size[1]) / strides[1] + 1.0
if math.ceil(out_width) > math.floor(out_width):
padding = (padding[0], padding[1] + 1)
if (padding[0] > 0) or (padding[1] > 0):
if is_channels_first(data_format):
x = tf.pad(x, [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2], mode="REFLECT")
else:
x = tf.pad(x, [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]], mode="REFLECT")
x = tf.keras.layers.MaxPooling2D(
pool_size=pool_size,
strides=strides,
padding="valid",
data_format=data_format,
name=name)(x)
return x
def avgpool2d(x,
pool_size,
strides,
padding=0,
ceil_mode=False,
data_format="channels_last",
name=None):
"""
Average pooling operation for two dimensional (spatial) data.
Parameters:
----------
x : Tensor
Input tensor.
pool_size : int or tuple/list of 2 int
Size of the max pooling windows.
strides : int or tuple/list of 2 int
Strides of the pooling.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default None
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if ceil_mode:
height = int(x.shape[2])
out_height = float(height + 2 * padding[0] - pool_size[0]) / strides[0] + 1.0
if math.ceil(out_height) > math.floor(out_height):
padding = (padding[0] + 1, padding[1])
width = int(x.shape[3])
out_width = float(width + 2 * padding[1] - pool_size[1]) / strides[1] + 1.0
if math.ceil(out_width) > math.floor(out_width):
padding = (padding[0], padding[1] + 1)
if (padding[0] > 0) or (padding[1] > 0):
if is_channels_first(data_format):
x = tf.pad(x, [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2], mode="CONSTANT")
else:
x = tf.pad(x, [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]], mode="CONSTANT")
x = tf.keras.layers.AveragePooling2D(
pool_size=pool_size,
strides=1,
padding="valid",
data_format=data_format,
name=name)(x)
if (strides[0] > 1) or (strides[1] > 1):
x = tf.keras.layers.AveragePooling2D(
pool_size=1,
strides=strides,
padding="valid",
data_format=data_format,
name=name + "/stride")(x)
return x
def conv2d(x,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
dilation=1,
groups=1,
use_bias=True,
data_format="channels_last",
name="conv2d"):
"""
Convolution 2D layer wrapper.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv2d'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if isinstance(dilation, int):
dilation = (dilation, dilation)
if (padding[0] > 0) or (padding[1] > 0):
if is_channels_first(data_format):
paddings_tf = [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2]
else:
paddings_tf = [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]]
x = tf.pad(x, paddings=paddings_tf)
if groups == 1:
x = tf.keras.layers.Conv2D(
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding="valid",
data_format=data_format,
dilation_rate=dilation,
use_bias=use_bias,
kernel_initializer=tf.keras.initializers.VarianceScaling(2.0),
name=name)(x)
elif (groups == out_channels) and (out_channels == in_channels):
assert (dilation[0] == 1) and (dilation[1] == 1)
kernel = tf.compat.v1.get_variable(
name=name + "/dw_kernel",
shape=kernel_size + (in_channels, 1),
initializer=tf.keras.initializers.VarianceScaling(2.0))
x = tf.nn.depthwise_conv2d(
input=x,
filter=kernel,
strides=(1, 1) + strides if is_channels_first(data_format) else (1,) + strides + (1,),
padding="VALID",
rate=(1, 1),
name=name,
data_format="NCHW" if is_channels_first(data_format) else "NHWC")
if use_bias:
raise NotImplementedError
else:
assert (in_channels % groups == 0)
assert (out_channels % groups == 0)
in_group_channels = in_channels // groups
out_group_channels = out_channels // groups
group_list = []
for gi in range(groups):
if is_channels_first(data_format):
xi = x[:, gi * in_group_channels:(gi + 1) * in_group_channels, :, :]
else:
xi = x[:, :, :, gi * in_group_channels:(gi + 1) * in_group_channels]
xi = tf.keras.layers.Conv2D(
filters=out_group_channels,
kernel_size=kernel_size,
strides=strides,
padding="valid",
data_format=data_format,
dilation_rate=dilation,
use_bias=use_bias,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
name=name + "/convgroup{}".format(gi + 1))(xi)
group_list.append(xi)
x = tf.concat(group_list, axis=get_channel_axis(data_format), name=name + "/concat")
return x
def conv1x1(x,
in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
data_format="channels_last",
name="conv1x1"):
"""
Convolution 1x1 layer.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv1x1'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name=name)
def conv3x3(x,
in_channels,
out_channels,
strides=1,
padding=1,
groups=1,
use_bias=False,
data_format="channels_last",
name="conv3x3"):
"""
Convolution 3x3 layer.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv3x3'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name=name)
def depthwise_conv3x3(x,
channels,
strides,
data_format="channels_last",
name="depthwise_conv3x3"):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
x : Tensor
Input tensor.
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'depthwise_conv3x3'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv2d(
x=x,
in_channels=channels,
out_channels=channels,
kernel_size=3,
strides=strides,
padding=1,
groups=channels,
use_bias=False,
data_format=data_format,
name=name)
def conv_block(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
activation="relu",
training=False,
data_format="channels_last",
name="conv_block"):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name=name + "/conv")
if use_bn:
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
if activation is not None:
x = get_activation_layer(
x=x,
activation=activation,
name=name + "/activ")
return x
def conv1x1_block(x,
in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="conv1x1_block"):
"""
1x1 version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv1x1_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=groups,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def conv3x3_block(x,
in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
activation="relu",
training=False,
data_format="channels_last",
name="conv3x3_block"):
"""
3x3 version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv3x3_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
activation=activation,
training=training,
data_format=data_format,
name=name)
def conv5x5_block(x,
in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
groups=1,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="conv3x3_block"):
"""
5x5 version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv3x3_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def conv7x7_block(x,
in_channels,
out_channels,
strides=1,
padding=3,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="conv7x7_block"):
"""
3x3 version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv7x7_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
padding=padding,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def dwconv3x3_block(x,
in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="dwconv3x3_block"):
"""
3x3 depthwise version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'dwconv3x3_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
groups=out_channels,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def dwconv5x5_block(x,
in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="dwconv3x3_block"):
"""
5x5 depthwise version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'dwconv3x3_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv5x5_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
groups=out_channels,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def pre_conv_block(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
return_preact=False,
training=False,
data_format="channels_last",
name="pre_conv_block"):
"""
Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'pre_conv_block'
Block name.
Returns:
-------
tuple of two Tensors
Resulted tensor and preactivated input tensor.
"""
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
x = tf.nn.relu(x, name=name + "/activ")
if return_preact:
x_pre_activ = x
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
data_format=data_format,
name=name + "/conv")
if return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(x,
in_channels,
out_channels,
strides=1,
return_preact=False,
training=False,
data_format="channels_last",
name="pre_conv1x1_block"):
"""
1x1 version of the pre-activated convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'pre_conv1x1_block'
Block name.
Returns:
-------
tuple of two Tensors
Resulted tensor and preactivated input tensor.
"""
return pre_conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
return_preact=return_preact,
training=training,
data_format=data_format,
name=name)
def pre_conv3x3_block(x,
in_channels,
out_channels,
strides=1,
return_preact=False,
training=False,
data_format="channels_last",
name="pre_conv3x3_block"):
"""
3x3 version of the pre-activated convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'pre_conv3x3_block'
Block name.
Returns:
-------
tuple of two Tensors
Resulted tensor and preactivated input tensor.
"""
return pre_conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=1,
return_preact=return_preact,
training=training,
data_format=data_format,
name=name)
def channel_shuffle(x,
groups,
data_format):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
data_format : str
The ordering of the dimensions in tensors.
Returns:
-------
keras.Tensor
Resulted tensor.
"""
x_shape = x.get_shape().as_list()
if is_channels_first(data_format):
channels = x_shape[1]
height = x_shape[2]
width = x_shape[3]
else:
height = x_shape[1]
width = x_shape[2]
channels = x_shape[3]
assert (channels % groups == 0)
channels_per_group = channels // groups
if is_channels_first(data_format):
x = tf.reshape(x, shape=(-1, groups, channels_per_group, height, width))
x = tf.transpose(x, perm=(0, 2, 1, 3, 4))
x = tf.reshape(x, shape=(-1, channels, height, width))
else:
x = tf.reshape(x, shape=(-1, height, width, groups, channels_per_group))
x = tf.transpose(x, perm=(0, 1, 2, 4, 3))
x = tf.reshape(x, shape=(-1, height, width, channels))
return x
def channel_shuffle2(x,
groups,
data_format):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
The alternative version.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
data_format : str
The ordering of the dimensions in tensors.
Returns:
-------
keras.Tensor
Resulted tensor.
"""
x_shape = x.get_shape().as_list()
if is_channels_first(data_format):
channels = x_shape[1]
height = x_shape[2]
width = x_shape[3]
else:
height = x_shape[1]
width = x_shape[2]
channels = x_shape[3]
assert (channels % groups == 0)
channels_per_group = channels // groups
if is_channels_first(data_format):
x = tf.reshape(x, shape=(-1, channels_per_group, groups, height, width))
x = tf.transpose(x, perm=(0, 2, 1, 3, 4))
x = tf.reshape(x, shape=(-1, channels, height, width))
else:
x = tf.reshape(x, shape=(-1, height, width, channels_per_group, groups))
x = tf.transpose(x, perm=(0, 1, 2, 4, 3))
x = tf.reshape(x, shape=(-1, height, width, channels))
return x
def se_block(x,
channels,
reduction=16,
approx_sigmoid=False,
round_mid=False,
activation="relu",
data_format="channels_last",
name="se_block"):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
x : Tensor
Input tensor.
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
approx_sigmoid : bool, default False
Whether to use approximated sigmoid function.
round_mid : bool, default False
Whether to round middle channel number (make divisible by 8).
activation : function or str, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'se_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert(len(x.shape) == 4)
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
pool_size = x.shape[2:4] if is_channels_first(data_format) else x.shape[1:3]
w = tf.keras.layers.AveragePooling2D(
pool_size=pool_size,
strides=1,
data_format=data_format,
name=name + "/pool")(x)
w = conv1x1(
x=w,
in_channels=channels,
out_channels=mid_channels,
use_bias=True,
data_format=data_format,
name=name + "/conv1/conv")
w = get_activation_layer(
x=w,
activation=activation,
name=name + "/activ")
w = conv1x1(
x=w,
in_channels=mid_channels,
out_channels=channels,
use_bias=True,
data_format=data_format,
name=name + "/conv2/conv")
w = hsigmoid(w, name=name + "/hsigmoid") if approx_sigmoid else tf.nn.sigmoid(w, name=name + "/sigmoid")
x = x * w
return x
| 39,625 | 28.265879 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/darknet53.py | """
DarkNet-53 for ImageNet-1K, implemented in TensorFlow.
Original source: 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
"""
__all__ = ['DarkNet53', 'darknet53']
import os
import tensorflow as tf
from .common import conv1x1_block, conv3x3_block, is_channels_first, flatten
def dark_unit(x,
in_channels,
out_channels,
alpha,
training,
data_format,
name="dark_unit"):
"""
DarkNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
alpha : float
Slope coefficient for Leaky ReLU activation.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'dark_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (out_channels % 2 == 0)
mid_channels = out_channels // 2
identity = x
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
activation=(lambda y: tf.nn.leaky_relu(y, alpha=alpha, name=name + "/conv1/activ")),
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
activation=(lambda y: tf.nn.leaky_relu(y, alpha=alpha, name=name + "/conv2/activ")),
training=training,
data_format=data_format,
name=name + "/conv2")
x = x + identity
return x
class DarkNet53(object):
"""
DarkNet-53 model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
alpha : float, default 0.1
Slope coefficient for Leaky ReLU activation.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
alpha=0.1,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(DarkNet53, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.alpha = alpha
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
activation=(lambda y: tf.nn.leaky_relu(
y,
alpha=self.alpha,
name="features/init_block/activ")),
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
if j == 0:
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
activation=(lambda y: tf.nn.leaky_relu(
y,
alpha=self.alpha,
name="features/stage{}/unit{}/active".format(i + 1, j + 1))),
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
else:
x = dark_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
alpha=self.alpha,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_darknet53(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DarkNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
layers = [2, 3, 9, 9, 5]
channels_per_layers = [64, 128, 256, 512, 1024]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = DarkNet53(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def darknet53(**kwargs):
"""
DarkNet-53 'Reference' model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_darknet53(model_name="darknet53", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
darknet53,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != darknet53 or weight_count == 41609928)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 8,796 | 31.223443 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/mobilenet.py | """
MobileNet & FD-MobileNet for ImageNet-1K, implemented in TensorFlow.
Original papers:
- 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
- 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750.
"""
__all__ = ['MobileNet', 'mobilenet_w1', 'mobilenet_w3d4', 'mobilenet_wd2', 'mobilenet_wd4', 'fdmobilenet_w1',
'fdmobilenet_w3d4', 'fdmobilenet_wd2', 'fdmobilenet_wd4']
import os
import tensorflow as tf
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, is_channels_first, flatten
def dws_conv_block(x,
in_channels,
out_channels,
strides,
training,
data_format,
name="dws_conv_block"):
"""
Depthwise separable convolution block with BatchNorms and activations at each convolution layers. It is used as
a MobileNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'dws_conv_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = dwconv3x3_block(
x=x,
in_channels=in_channels,
out_channels=in_channels,
strides=strides,
training=training,
data_format=data_format,
name=name + "/dw_conv")
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
training=training,
data_format=data_format,
name=name + "/pw_conv")
return x
class MobileNet(object):
"""
MobileNet model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861. Also this class implements FD-MobileNet from 'FD-MobileNet: Improved MobileNet
with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
first_stage_stride : bool
Whether stride is used at the first stage.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
first_stage_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MobileNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.first_stage_stride = first_stage_stride
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
init_block_channels = self.channels[0][0]
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = init_block_channels
for i, channels_per_stage in enumerate(self.channels[1:]):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and ((i != 0) or self.first_stage_stride) else 1
x = dws_conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_mobilenet(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create MobileNet or FD-MobileNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('orig' or 'fd').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if version == 'orig':
channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 512], [1024, 1024]]
first_stage_stride = False
elif version == 'fd':
channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 1024]]
first_stage_stride = True
else:
raise ValueError("Unsupported MobileNet version {}".format(version))
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
net = MobileNet(
channels=channels,
first_stage_stride=first_stage_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def mobilenet_w1(**kwargs):
"""
1.0 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="orig", width_scale=1.0, model_name="mobilenet_w1", **kwargs)
def mobilenet_w3d4(**kwargs):
"""
0.75 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="orig", width_scale=0.75, model_name="mobilenet_w3d4", **kwargs)
def mobilenet_wd2(**kwargs):
"""
0.5 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="orig", width_scale=0.5, model_name="mobilenet_wd2", **kwargs)
def mobilenet_wd4(**kwargs):
"""
0.25 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="orig", width_scale=0.25, model_name="mobilenet_wd4", **kwargs)
def fdmobilenet_w1(**kwargs):
"""
FD-MobileNet 1.0x from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="fd", width_scale=1.0, model_name="fdmobilenet_w1", **kwargs)
def fdmobilenet_w3d4(**kwargs):
"""
FD-MobileNet 0.75x from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="fd", width_scale=0.75, model_name="fdmobilenet_w3d4", **kwargs)
def fdmobilenet_wd2(**kwargs):
"""
FD-MobileNet 0.5x from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="fd", width_scale=0.5, model_name="fdmobilenet_wd2", **kwargs)
def fdmobilenet_wd4(**kwargs):
"""
FD-MobileNet 0.25x from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="fd", width_scale=0.25, model_name="fdmobilenet_wd4", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
mobilenet_w1,
mobilenet_w3d4,
mobilenet_wd2,
mobilenet_wd4,
fdmobilenet_w1,
fdmobilenet_w3d4,
fdmobilenet_wd2,
fdmobilenet_wd4,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenet_w1 or weight_count == 4231976)
assert (model != mobilenet_w3d4 or weight_count == 2585560)
assert (model != mobilenet_wd2 or weight_count == 1331592)
assert (model != mobilenet_wd4 or weight_count == 470072)
assert (model != fdmobilenet_w1 or weight_count == 2901288)
assert (model != fdmobilenet_w3d4 or weight_count == 1833304)
assert (model != fdmobilenet_wd2 or weight_count == 993928)
assert (model != fdmobilenet_wd4 or weight_count == 383160)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 14,167 | 31.645161 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/darknet.py | """
DarkNet for ImageNet-1K, implemented in TensorFlow.
Original source: 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
"""
__all__ = ['DarkNet', 'darknet_ref', 'darknet_tiny', 'darknet19']
import os
import tensorflow as tf
from .common import conv2d, maxpool2d, conv1x1_block, conv3x3_block, is_channels_first, flatten
def dark_convYxY(x,
in_channels,
out_channels,
alpha,
pointwise,
training,
data_format,
name="dark_convYxY"):
"""
DarkNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
alpha : float
Slope coefficient for Leaky ReLU activation.
pointwise : bool
Whether use 1x1 (pointwise) convolution or 3x3 convolution.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'dark_convYxY'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
if pointwise:
return conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
activation=(lambda y: tf.nn.leaky_relu(y, alpha=alpha, name=name + "/activ")),
training=training,
data_format=data_format,
name=name)
else:
return conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
activation=(lambda y: tf.nn.leaky_relu(y, alpha=alpha, name=name + "/activ")),
training=training,
data_format=data_format,
name=name)
class DarkNet(object):
"""
DarkNet model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
odd_pointwise : bool
Whether pointwise convolution layer is used for each odd unit.
avg_pool_size : int
Window size of the final average pooling.
cls_activ : bool
Whether classification convolution layer uses an activation.
alpha : float, default 0.1
Slope coefficient for Leaky ReLU activation.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
odd_pointwise,
avg_pool_size,
cls_activ,
alpha=0.1,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(DarkNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.odd_pointwise = odd_pointwise
self.avg_pool_size = avg_pool_size
self.cls_activ = cls_activ
self.alpha = alpha
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
x = dark_convYxY(
x=x,
in_channels=in_channels,
out_channels=out_channels,
alpha=self.alpha,
pointwise=(len(channels_per_stage) > 1) and not (((j + 1) % 2 == 1) ^ self.odd_pointwise),
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
if i != len(self.channels) - 1:
x = maxpool2d(
x=x,
pool_size=2,
strides=2,
data_format=self.data_format,
name="features/pool{}".format(i + 1))
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=self.classes,
kernel_size=1,
data_format=self.data_format,
name="output/final_conv")
if self.cls_activ:
x = tf.nn.leaky_relu(x, alpha=self.alpha, name="output/final_activ")
x = tf.keras.layers.AveragePooling2D(
pool_size=self.avg_pool_size,
strides=1,
data_format=self.data_format,
name="output/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
return x
def get_darknet(version,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DarkNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('ref', 'tiny' or '19').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if version == 'ref':
channels = [[16], [32], [64], [128], [256], [512], [1024]]
odd_pointwise = False
avg_pool_size = 3
cls_activ = True
elif version == 'tiny':
channels = [[16], [32], [16, 128, 16, 128], [32, 256, 32, 256], [64, 512, 64, 512, 128]]
odd_pointwise = True
avg_pool_size = 14
cls_activ = False
elif version == '19':
channels = [[32], [64], [128, 64, 128], [256, 128, 256], [512, 256, 512, 256, 512],
[1024, 512, 1024, 512, 1024]]
odd_pointwise = False
avg_pool_size = 7
cls_activ = False
else:
raise ValueError("Unsupported DarkNet version {}".format(version))
net = DarkNet(
channels=channels,
odd_pointwise=odd_pointwise,
avg_pool_size=avg_pool_size,
cls_activ=cls_activ,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def darknet_ref(**kwargs):
"""
DarkNet 'Reference' model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_darknet(version="ref", model_name="darknet_ref", **kwargs)
def darknet_tiny(**kwargs):
"""
DarkNet Tiny model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_darknet(version="tiny", model_name="darknet_tiny", **kwargs)
def darknet19(**kwargs):
"""
DarkNet-19 model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_darknet(version="19", model_name="darknet19", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
darknet_ref,
darknet_tiny,
darknet19,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != darknet_ref or weight_count == 7319416)
assert (model != darknet_tiny or weight_count == 1042104)
assert (model != darknet19 or weight_count == 20842376)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 10,892 | 31.038235 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/alexnet.py | """
AlexNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
"""
__all__ = ['AlexNet', 'alexnet', 'alexnetb']
import os
import tensorflow as tf
from .common import maxpool2d, conv_block, is_channels_first, flatten
def alex_conv(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
use_lrn,
training,
data_format,
name="alex_conv"):
"""
AlexNet specific convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
use_lrn : bool
Whether to use LRN layer.
training : bool
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'alex_conv'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
use_bn=False,
training=training,
data_format=data_format,
name=name + "/conv")
if use_lrn:
x = tf.nn.lrn(x, bias=2, alpha=1e-4, beta=0.75)
return x
def alex_dense(x,
in_channels,
out_channels,
training,
name="alex_dense"):
"""
AlexNet specific dense block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
name : str, default 'alex_dense'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (in_channels > 0)
x = tf.keras.layers.Dense(
units=out_channels,
name=name + "/fc")(x)
x = tf.nn.relu(x, name=name + "/activ")
x = tf.keras.layers.Dropout(
rate=0.5,
name=name + "/dropout")(
inputs=x,
training=training)
return x
def alex_output_block(x,
in_channels,
classes,
training,
name="alex_output_block"):
"""
AlexNet specific output block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
name : str, default 'alex_output_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = 4096
x = alex_dense(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
training=training,
name=name + "/fc1")
x = alex_dense(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
training=training,
name=name + "/fc2")
x = tf.keras.layers.Dense(
units=classes,
name=name + "/fc3")(x)
return x
class AlexNet(object):
"""
AlexNet model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
kernel_sizes : list of list of int
Convolution window sizes for each unit.
strides : list of list of int or tuple/list of 2 int
Strides of the convolution for each unit.
paddings : list of list of int or tuple/list of 2 int
Padding value for convolution layer for each unit.
use_lrn : bool
Whether to use LRN layer.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
kernel_sizes,
strides,
paddings,
use_lrn,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(AlexNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.kernel_sizes = kernel_sizes
self.strides = strides
self.paddings = paddings
self.use_lrn = use_lrn
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
for i, channels_per_stage in enumerate(self.channels):
use_lrn_i = self.use_lrn and (i in [0, 1])
for j, out_channels in enumerate(channels_per_stage):
x = alex_conv(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=self.kernel_sizes[i][j],
strides=self.strides[i][j],
padding=self.paddings[i][j],
use_lrn=use_lrn_i,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=0,
ceil_mode=True,
data_format=self.data_format,
name="features/stage{}/pool".format(i + 1))
in_channels = in_channels * 6 * 6
x = flatten(
x=x,
data_format=self.data_format)
x = alex_output_block(
x=x,
in_channels=in_channels,
classes=self.classes,
training=training,
name="output")
return x
def get_alexnet(version="a",
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create AlexNet model with specific parameters.
Parameters:
----------
version : str, default 'a'
Version of AlexNet ('a' or 'b').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "a":
channels = [[96], [256], [384, 384, 256]]
kernel_sizes = [[11], [5], [3, 3, 3]]
strides = [[4], [1], [1, 1, 1]]
paddings = [[0], [2], [1, 1, 1]]
use_lrn = True
elif version == "b":
channels = [[64], [192], [384, 256, 256]]
kernel_sizes = [[11], [5], [3, 3, 3]]
strides = [[4], [1], [1, 1, 1]]
paddings = [[2], [2], [1, 1, 1]]
use_lrn = False
else:
raise ValueError("Unsupported AlexNet version {}".format(version))
net = AlexNet(
channels=channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
use_lrn=use_lrn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def alexnet(**kwargs):
"""
AlexNet model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_alexnet(model_name="alexnet", **kwargs)
def alexnetb(**kwargs):
"""
AlexNet-b model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997. Non-standard version.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_alexnet(version="b", model_name="alexnetb", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
alexnet,
alexnetb,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != alexnet or weight_count == 62378344)
assert (model != alexnetb or weight_count == 61100840)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 11,485 | 28.603093 | 116 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.